mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 18:00:08 +00:00
Fix slow reconcile when quickly reverting resize patch
This commit is contained in:
parent
9c571abeec
commit
68fcc9cf8a
@ -50,6 +50,7 @@ import (
|
|||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -2898,6 +2899,12 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
if err := kl.statusManager.SetPodAllocation(pod); err != nil {
|
if err := kl.statusManager.SetPodAllocation(pod); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
for i, container := range pod.Spec.Containers {
|
||||||
|
if !apiequality.Semantic.DeepEqual(container.Resources, allocatedPod.Spec.Containers[i].Resources) {
|
||||||
|
key := kuberuntime.GetStableKey(pod, &container)
|
||||||
|
kl.backOff.Reset(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
allocatedPod = pod
|
allocatedPod = pod
|
||||||
}
|
}
|
||||||
if resizeStatus != "" {
|
if resizeStatus != "" {
|
||||||
|
@ -2676,6 +2676,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
newRequestsAllocated bool // Whether the new requests have already been allocated (but not actuated)
|
newRequestsAllocated bool // Whether the new requests have already been allocated (but not actuated)
|
||||||
expectedAllocations v1.ResourceList
|
expectedAllocations v1.ResourceList
|
||||||
expectedResize v1.PodResizeStatus
|
expectedResize v1.PodResizeStatus
|
||||||
|
expectBackoffReset bool
|
||||||
goos string
|
goos string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -2684,6 +2685,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
newRequests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
newRequests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
expectedResize: v1.PodResizeStatusInProgress,
|
expectedResize: v1.PodResizeStatusInProgress,
|
||||||
|
expectBackoffReset: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Request CPU increase, memory decrease - expect InProgress",
|
name: "Request CPU increase, memory decrease - expect InProgress",
|
||||||
@ -2691,6 +2693,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
newRequests: v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
|
newRequests: v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
|
||||||
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
|
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
|
||||||
expectedResize: v1.PodResizeStatusInProgress,
|
expectedResize: v1.PodResizeStatusInProgress,
|
||||||
|
expectBackoffReset: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Request CPU decrease, memory increase - expect InProgress",
|
name: "Request CPU decrease, memory increase - expect InProgress",
|
||||||
@ -2698,6 +2701,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
newRequests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
|
newRequests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
|
||||||
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
|
expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
|
||||||
expectedResize: v1.PodResizeStatusInProgress,
|
expectedResize: v1.PodResizeStatusInProgress,
|
||||||
|
expectBackoffReset: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Request CPU and memory increase beyond current capacity - expect Deferred",
|
name: "Request CPU and memory increase beyond current capacity - expect Deferred",
|
||||||
@ -2788,6 +2792,11 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
now := kubelet.clock.Now()
|
||||||
|
// Put the container in backoff so we can confirm backoff is reset.
|
||||||
|
backoffKey := kuberuntime.GetStableKey(tt.pod, &tt.pod.Spec.Containers[0])
|
||||||
|
kubelet.backOff.Next(backoffKey, now)
|
||||||
|
|
||||||
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tt.expectedAllocations, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec resources")
|
assert.Equal(t, tt.expectedAllocations, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec resources")
|
||||||
@ -2798,6 +2807,13 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
|
|
||||||
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID)
|
||||||
assert.Equal(t, tt.expectedResize, resizeStatus)
|
assert.Equal(t, tt.expectedResize, resizeStatus)
|
||||||
|
|
||||||
|
isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now)
|
||||||
|
if tt.expectBackoffReset {
|
||||||
|
assert.False(t, isInBackoff, "container backoff should be reset")
|
||||||
|
} else {
|
||||||
|
assert.True(t, isInBackoff, "container backoff should not be reset")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,10 +172,10 @@ func isInitContainerFailed(status *kubecontainer.Status) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// getStableKey generates a key (string) to uniquely identify a
|
// GetStableKey generates a key (string) to uniquely identify a
|
||||||
// (pod, container) tuple. The key should include the content of the
|
// (pod, container) tuple. The key should include the content of the
|
||||||
// container, so that any change to the container generates a new key.
|
// container, so that any change to the container generates a new key.
|
||||||
func getStableKey(pod *v1.Pod, container *v1.Container) string {
|
func GetStableKey(pod *v1.Pod, container *v1.Container) string {
|
||||||
hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||||
return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
|
return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
|
||||||
}
|
}
|
||||||
|
@ -113,11 +113,11 @@ func TestStableKey(t *testing.T) {
|
|||||||
Containers: []v1.Container{*container},
|
Containers: []v1.Container{*container},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
oldKey := getStableKey(pod, container)
|
oldKey := GetStableKey(pod, container)
|
||||||
|
|
||||||
// Updating the container image should change the key.
|
// Updating the container image should change the key.
|
||||||
container.Image = "foo/image:v2"
|
container.Image = "foo/image:v2"
|
||||||
newKey := getStableKey(pod, container)
|
newKey := GetStableKey(pod, container)
|
||||||
assert.NotEqual(t, oldKey, newKey)
|
assert.NotEqual(t, oldKey, newKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1433,7 +1433,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
|||||||
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
|
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
|
||||||
ts := cStatus.FinishedAt
|
ts := cStatus.FinishedAt
|
||||||
// backOff requires a unique key to identify the container.
|
// backOff requires a unique key to identify the container.
|
||||||
key := getStableKey(pod, container)
|
key := GetStableKey(pod, container)
|
||||||
if backOff.IsInBackOffSince(key, ts) {
|
if backOff.IsInBackOffSince(key, ts) {
|
||||||
if containerRef, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
if containerRef, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
||||||
m.recorder.Eventf(containerRef, v1.EventTypeWarning, events.BackOffStartContainer,
|
m.recorder.Eventf(containerRef, v1.EventTypeWarning, events.BackOffStartContainer,
|
||||||
|
Loading…
Reference in New Issue
Block a user