Fix build error

This commit is contained in:
vivzbansal 2024-11-08 05:54:30 +00:00
parent 1eb966cb45
commit 1cf4587277
11 changed files with 97 additions and 54 deletions

View File

@ -1314,7 +1314,7 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
continue
}
newPod.Status.Resize = api.PodResizeStatusProposed
break
return
}
}
}

View File

@ -245,11 +245,20 @@ func setDefaultResizePolicy(obj *v1.Container) {
})
}
}
if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
if !resizePolicySpecified[v1.ResourceCPU] {
if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
} else if _, exists := obj.Resources.Limits[v1.ResourceCPU]; exists {
defaultResizePolicy(v1.ResourceCPU)
}
}
if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
if !resizePolicySpecified[v1.ResourceMemory] {
if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
} else if _, exists := obj.Resources.Limits[v1.ResourceMemory]; exists {
defaultResizePolicy(v1.ResourceMemory)
}
}
}

View File

@ -3195,13 +3195,12 @@ func TestSetDefaultResizePolicy(t *testing.T) {
}
output := roundTrip(t, runtime.Object(&testPod))
pod2 := output.(*v1.Pod)
if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy)
}
if isSidecarContainer {
if !cmp.Equal(pod2.Spec.InitContainers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for restartable init containers", tc.expectedResizePolicy, pod2.Spec.InitContainers[0].ResizePolicy)
}
} else if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) {
t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy)
}
})
}

View File

@ -5289,7 +5289,6 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
for ix, container := range mungedPodSpec.Containers {
container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
newContainers = append(newContainers, container)
}
mungedPodSpec.Containers = newContainers
// munge spec.initContainers[*].image
@ -5644,10 +5643,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
originalCPUMemPodSpec := *newPod.Spec.DeepCopy()
var newContainers []core.Container
for ix, container := range originalCPUMemPodSpec.Containers {
lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits)
req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
container.ResizePolicy = oldPod.Spec.Containers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.Containers[ix])
newContainers = append(newContainers, container)
}
originalCPUMemPodSpec.Containers = newContainers
@ -5657,10 +5653,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for ix, container := range originalCPUMemPodSpec.InitContainers {
if container.RestartPolicy != nil && *container.RestartPolicy == core.ContainerRestartPolicyAlways { // restartable init container
lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.InitContainers[ix].Resources.Limits)
req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.InitContainers[ix].Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
container.ResizePolicy = oldPod.Spec.InitContainers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
}
newInitContainers = append(newInitContainers, container)
}
@ -5683,25 +5676,32 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
return allErrs
}
func dropCPUMemoryUpdates(resourceList, oldResourceList core.ResourceList) core.ResourceList {
if oldResourceList == nil {
return nil
// dropCPUMemoryResourcesFromContainer deletes the cpu and memory resources from the container, and copies them from the old pod container resources if present.
func dropCPUMemoryResourcesFromContainer(container *core.Container, oldPodSpecContainer *core.Container) {
dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList {
if oldResourceList == nil {
return nil
}
var mungedResourceList core.ResourceList
if resourceList == nil {
mungedResourceList = make(core.ResourceList)
} else {
mungedResourceList = resourceList.DeepCopy()
}
delete(mungedResourceList, core.ResourceCPU)
delete(mungedResourceList, core.ResourceMemory)
if cpu, found := oldResourceList[core.ResourceCPU]; found {
mungedResourceList[core.ResourceCPU] = cpu
}
if mem, found := oldResourceList[core.ResourceMemory]; found {
mungedResourceList[core.ResourceMemory] = mem
}
return mungedResourceList
}
var mungedResourceList core.ResourceList
if resourceList == nil {
mungedResourceList = make(core.ResourceList)
} else {
mungedResourceList = resourceList.DeepCopy()
}
delete(mungedResourceList, core.ResourceCPU)
delete(mungedResourceList, core.ResourceMemory)
if cpu, found := oldResourceList[core.ResourceCPU]; found {
mungedResourceList[core.ResourceCPU] = cpu
}
if mem, found := oldResourceList[core.ResourceMemory]; found {
mungedResourceList[core.ResourceMemory] = mem
}
return mungedResourceList
lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPodSpecContainer.Resources.Limits)
req := dropCPUMemoryUpdates(container.Resources.Requests, oldPodSpecContainer.Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
container.ResizePolicy = oldPodSpecContainer.ResizePolicy // +k8s:verify-mutation:reason=clone
}
// isPodResizeRequestSupported checks whether the pod is running on a node with InPlacePodVerticalScaling enabled.
@ -8630,3 +8630,11 @@ func validateImageVolumeSource(imageVolume *core.ImageVolumeSource, fldPath *fie
allErrs = append(allErrs, validatePullPolicy(imageVolume.PullPolicy, fldPath.Child("pullPolicy"))...)
return allErrs
}
// isRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways.
func isRestartableInitContainer(initContainer *core.Container) bool {
if initContainer == nil || initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways
}

View File

@ -26053,12 +26053,6 @@ func TestValidatePodResize(t *testing.T) {
test.new.Namespace = "namespace"
test.old.Namespace = "namespace"
}
if test.isSideCarCtr {
if test.new.Spec.InitContainers == nil && test.old.Spec.InitContainers == nil {
test.new.Spec.InitContainers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}}
test.old.Spec.InitContainers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}}
}
}
if test.new.Spec.Containers == nil && test.old.Spec.Containers == nil {
test.new.Spec.Containers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}}
test.old.Spec.Containers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/metrics"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// v1PodResourcesServer implements PodResourcesListerServer

View File

@ -1780,6 +1780,7 @@ func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontaine
return true
}
// allocatedContainerResourcesMatchStatus returns true if the container resources matches with the container statuses resources.
func allocatedContainerResourcesMatchStatus(allocatedPod *v1.Pod, c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
if cs.State != kubecontainer.ContainerStateRunning {

View File

@ -51,7 +51,6 @@ import (
remote "k8s.io/cri-client/pkg"
kubelettypes "k8s.io/kubelet/pkg/types"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
@ -1072,14 +1071,12 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
// If the container is previously initialized but its status is not
// found, it means its last status is removed for some reason.
// Restart it if it is a restartable init container.
if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) {
if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
}
continue
}
if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) {
if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) {
// after initialization, only restartable init containers need to be kept
// running
@ -1096,7 +1093,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
case kubecontainer.ContainerStateRunning:
if !podutil.IsRestartableInitContainer(container) {
if !podutil.IsRestartableInitContainer(container) {
break
}
@ -1173,7 +1169,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
// If the init container failed and the restart policy is Never, the pod is terminal.
// Otherwise, restart the init container.
case kubecontainer.ContainerStateExited:
if podutil.IsRestartableInitContainer(container) {
if podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} else { // init container
@ -1197,7 +1192,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
}
default: // kubecontainer.ContainerStatusUnknown or other unknown states
if podutil.IsRestartableInitContainer(container) {
if podutil.IsRestartableInitContainer(container) {
// If the restartable init container is in unknown state, restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{

View File

@ -750,9 +750,12 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
}
}
if len(podContainerChanges.InitContainersToUpdate[rName]) > 0 {
if err = m.updatePodContainersResources(pod, rName, podContainerChanges.InitContainersToUpdate[rName], true); err != nil {
klog.ErrorS(err, "updatePodContainersResources failed for init containers", "pod", format.Pod(pod), "resource", rName)
return err
updateContainerResults, errUpdate := m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.InitContainersToUpdate[rName], true)
for _, containerResult := range updateContainerResults {
result.AddSyncResult(containerResult)
}
if errUpdate != nil {
return errUpdate
}
}

View File

@ -2821,9 +2821,11 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
apiSpecResources []v1.ResourceRequirements
apiStatusResources []v1.ResourceRequirements
requiresRestart []bool
injectedError error
invokeUpdateResources bool
expectedCurrentLimits []v1.ResourceList
expectedCurrentRequests []v1.ResourceList
expectedResults []*kubecontainer.SyncResult
}{
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
resourceName: v1.ResourceCPU,
@ -2841,6 +2843,20 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
expectedResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.UpdateContainerCPU,
Target: pod.Spec.InitContainers[0].Name,
},
{
Action: kubecontainer.UpdateContainerCPU,
Target: pod.Spec.InitContainers[1].Name,
},
{
Action: kubecontainer.UpdateContainerCPU,
Target: pod.Spec.InitContainers[2].Name,
},
},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
resourceName: v1.ResourceMemory,
@ -2858,12 +2874,27 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.InitContainers[0].Name,
},
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.InitContainers[1].Name,
},
{
Action: kubecontainer.UpdateContainerMemory,
Target: pod.Spec.InitContainers[2].Name,
},
},
},
} {
var initContainersToUpdate []containerToUpdateInfo
for idx := range pod.Spec.InitContainers {
// default resize policy when pod resize feature is enabled
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cInfo := containerToUpdateInfo{
apiContainerIdx: idx,
kubeContainerID: kubecontainer.ContainerID{},
@ -2883,8 +2914,14 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
initContainersToUpdate = append(initContainersToUpdate, cInfo)
}
fakeRuntime.Called = []string{}
err := m.updatePodContainersResources(pod, tc.resourceName, initContainersToUpdate, true)
require.NoError(t, err, dsc)
updateContainerResults, err := m.updatePodContainerResources(context.TODO(), pod, tc.resourceName, initContainersToUpdate, true)
assert.ElementsMatch(t, tc.expectedResults, updateContainerResults)
if tc.injectedError == nil {
require.NoError(t, err, dsc)
} else {
require.EqualError(t, err, tc.injectedError.Error(), dsc)
}
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)

View File

@ -323,7 +323,6 @@ func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod {
pod.Spec.InitContainers[idx].ResizePolicy = ctr.ResizePolicy
}
}
return pod
}