Fix null pointer access in doPodResizeAction for kubeletonly mode

This commit is contained in:
vinay kulkarni 2023-03-12 05:59:14 +00:00
parent ead7d66ee1
commit 1c7850c355
2 changed files with 16 additions and 3 deletions

View File

@ -17,6 +17,8 @@ limitations under the License.
package cm
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
@ -96,11 +98,11 @@ func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceLi
}
func (m *podContainerManagerStub) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) {
return nil, nil
return nil, fmt.Errorf("not implemented")
}
func (m *podContainerManagerStub) SetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName, _ *ResourceConfig) error {
return nil
return fmt.Errorf("not implemented")
}
func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager {

View File

@ -697,6 +697,11 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
return err
}
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
if podResources.Memory == nil {
klog.ErrorS(nil, "podResources.Memory is nil", "pod", pod.Name)
result.Fail(fmt.Errorf("podResources.Memory is nil for pod %s", pod.Name))
return
}
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
if err != nil {
klog.ErrorS(err, "GetPodCgroupConfig for memory failed", "pod", pod.Name)
@ -720,6 +725,11 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku
}
}
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources {
if podResources.CPUQuota == nil || podResources.CPUShares == nil {
klog.ErrorS(nil, "podResources.CPUQuota or podResources.CPUShares is nil", "pod", pod.Name)
result.Fail(fmt.Errorf("podResources.CPUQuota or podResources.CPUShares is nil for pod %s", pod.Name))
return
}
currentPodCpuConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
if err != nil {
klog.ErrorS(err, "GetPodCgroupConfig for CPU failed", "pod", pod.Name)
@ -941,6 +951,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
message = fmt.Sprintf("Container %s failed startup probe", container.Name)
reason = reasonStartupProbe
} else if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
!types.IsStaticPod(pod) &&
!m.computePodResizeAction(pod, idx, containerStatus, &changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
continue
@ -1213,7 +1224,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
}
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !types.IsStaticPod(pod) {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
m.doPodResizeAction(pod, podStatus, podContainerChanges, result)
}