mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Always use allocated resources for pods that don't support resize
This commit is contained in:
parent
6df3ea46d9
commit
460db5c137
@ -1883,7 +1883,7 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
|
||||
// handlePodResourcesResize updates the pod to use the allocated resources. This should come
|
||||
// before the main business logic of SyncPod, so that a consistent view of the pod is used
|
||||
// across the sync loop.
|
||||
if kuberuntime.IsInPlacePodVerticalScalingAllowed(pod) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
// Handle pod resize here instead of doing it in HandlePodUpdates because
|
||||
// this conveniently retries any Deferred resize requests
|
||||
// TODO(vinaykul,InPlacePodVerticalScaling): Investigate doing this in HandlePodUpdates + periodic SyncLoop scan
|
||||
@ -2816,10 +2816,6 @@ func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) {
|
||||
// pod should hold the desired (pre-allocated) spec.
|
||||
// Returns true if the resize can proceed.
|
||||
func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, v1.PodResizeStatus, string) {
|
||||
if goos == "windows" {
|
||||
return false, v1.PodResizeStatusInfeasible, "Resizing Windows pods is not supported"
|
||||
}
|
||||
|
||||
if v1qos.GetPodQOS(pod) == v1.PodQOSGuaranteed && !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingExclusiveCPUs) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CPUManager) {
|
||||
if kl.containerManager.GetNodeConfig().CPUManagerPolicy == "static" {
|
||||
@ -2877,6 +2873,27 @@ func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, v1.PodResizeStatus, string)
|
||||
// the allocation decision and pod status.
|
||||
func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (*v1.Pod, error) {
|
||||
allocatedPod, updated := kl.allocationManager.UpdatePodFromAllocation(pod)
|
||||
// Keep this logic in sync with kuberuntime.isInPlacePodVerticalScalingAllowed
|
||||
if goos == "windows" || kubetypes.IsStaticPod(pod) {
|
||||
if updated {
|
||||
// A resize is requested but not supported.
|
||||
var msg string
|
||||
switch {
|
||||
case goos == "windows":
|
||||
msg = "Resizing Windows pods is not supported"
|
||||
case kubetypes.IsStaticPod(pod):
|
||||
msg = "Resizing static pods is not supported"
|
||||
default:
|
||||
msg = "Resizing this pod is not supported"
|
||||
}
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.ResizeInfeasible, msg)
|
||||
kl.statusManager.SetPodResizeStatus(pod.UID, v1.PodResizeStatusInfeasible)
|
||||
} else {
|
||||
kl.statusManager.SetPodResizeStatus(pod.UID, "")
|
||||
}
|
||||
return allocatedPod, nil
|
||||
}
|
||||
|
||||
if !updated {
|
||||
// Desired resources == allocated resources. Check whether a resize is in progress.
|
||||
resizeInProgress := !allocatedResourcesMatchStatus(allocatedPod, podStatus)
|
||||
|
@ -1748,10 +1748,6 @@ func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.Pod
|
||||
}
|
||||
|
||||
func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus, podIsTerminal bool) v1.PodResizeStatus {
|
||||
if kubetypes.IsStaticPod(allocatedPod) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// If pod is terminal, clear the resize status.
|
||||
if podIsTerminal {
|
||||
kl.statusManager.SetPodResizeStatus(allocatedPod.UID, "")
|
||||
|
@ -2712,6 +2712,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
expectedResize v1.PodResizeStatus
|
||||
expectBackoffReset bool
|
||||
goos string
|
||||
annotations map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Request CPU and memory decrease - expect InProgress",
|
||||
@ -2788,6 +2789,14 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
expectedResize: v1.PodResizeStatusInfeasible,
|
||||
goos: "windows",
|
||||
},
|
||||
{
|
||||
name: "static pod, expect Infeasible",
|
||||
originalRequests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||
newRequests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
expectedAllocatedReqs: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
|
||||
expectedResize: v1.PodResizeStatusInfeasible,
|
||||
annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource},
|
||||
},
|
||||
{
|
||||
name: "Increase CPU from min shares",
|
||||
originalRequests: v1.ResourceList{v1.ResourceCPU: cpu2m},
|
||||
@ -2889,6 +2898,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
originalPod = testPod1.DeepCopy()
|
||||
originalCtr = &originalPod.Spec.Containers[0]
|
||||
}
|
||||
originalPod.Annotations = tt.annotations
|
||||
originalCtr.Resources.Requests = tt.originalRequests
|
||||
originalCtr.Resources.Limits = tt.originalLimits
|
||||
|
||||
|
@ -551,7 +551,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo
|
||||
return cStatus.State == kubecontainer.ContainerStateExited && cStatus.ExitCode == 0
|
||||
}
|
||||
|
||||
func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
|
||||
func isInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
|
||||
!types.IsStaticPod(pod) &&
|
||||
runtime.GOOS != "windows"
|
||||
@ -561,7 +561,7 @@ func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool {
|
||||
// Returns whether to keep (true) or restart (false) the container.
|
||||
// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
|
||||
func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
|
||||
if !IsInPlacePodVerticalScalingAllowed(pod) {
|
||||
if !isInPlacePodVerticalScalingAllowed(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -998,7 +998,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
}
|
||||
}
|
||||
|
||||
if IsInPlacePodVerticalScalingAllowed(pod) {
|
||||
if isInPlacePodVerticalScalingAllowed(pod) {
|
||||
changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
|
||||
}
|
||||
|
||||
@ -1414,7 +1414,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
}
|
||||
|
||||
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
|
||||
if IsInPlacePodVerticalScalingAllowed(pod) {
|
||||
if isInPlacePodVerticalScalingAllowed(pod) {
|
||||
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
|
||||
m.doPodResizeAction(pod, podContainerChanges, &result)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user