Add more comments

This commit is contained in:
Tim Allclair 2024-10-24 15:51:19 -07:00
parent 321eff34f7
commit d1f1bf200c
3 changed files with 13 additions and 6 deletions

View File

@ -1788,7 +1788,10 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !kubetypes.IsStaticPod(pod) {
// handlePodResourcesResize updates the pod to use the allocated resources. This should come
// before the main business logic of SyncPod, so that a consistent view of the pod is used
// across the sync loop.
if kuberuntime.IsInPlacePodVerticalScalingAllowed(pod) {
// Handle pod resize here instead of doing it in HandlePodUpdates because
// this conveniently retries any Deferred resize requests
// TODO(vinaykul,InPlacePodVerticalScaling): Investigate doing this in HandlePodUpdates + periodic SyncLoop scan
@ -1976,7 +1979,9 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && isPodResizeInProgress(pod, podStatus) {
// While resize is in progress, periodically call PLEG to update pod cache
// While resize is in progress, periodically request the latest status from the runtime via
// the PLEG. This is necessary since ordinarily pod status is only fetched when a container
// undergoes a state transition.
runningPod := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err, _ := kl.pleg.UpdateCache(&runningPod, pod.UID); err != nil {
klog.ErrorS(err, "Failed to update pod cache", "pod", klog.KObj(pod))

View File

@ -539,7 +539,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo
return cStatus.State == kubecontainer.ContainerStateExited && cStatus.ExitCode == 0
}
func isInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return false
}
@ -927,7 +927,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
}
}
if isInPlacePodVerticalScalingAllowed(pod) {
if IsInPlacePodVerticalScalingAllowed(pod) {
changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
}
@ -985,7 +985,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
// If the container failed the startup probe, we should kill it.
message = fmt.Sprintf("Container %s failed startup probe", container.Name)
reason = reasonStartupProbe
} else if isInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) {
} else if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
continue
} else {
@ -1302,7 +1302,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
}
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
if isInPlacePodVerticalScalingAllowed(pod) {
if IsInPlacePodVerticalScalingAllowed(pod) {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
m.doPodResizeAction(pod, podStatus, podContainerChanges, result)
}

View File

@ -37,6 +37,8 @@ const (
// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
// OOMScoreAdjust should be calculated based on the allocated resources, so the pod argument should
// contain the allocated resources in the spec.
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
if types.IsNodeCriticalPod(pod) {
// Only node critical pod should be the last to get killed.