mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #124220 from HirazawaUi/fix-pod-restarted
[kubelet]: fixed container restart due to pod spec field changes
This commit is contained in:
commit
dad8fe71f2
@ -68,8 +68,7 @@ var (
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
sampleV115HashValue = uint64(0x311670a)
|
sampleV131HashValue = uint64(0x8e45cbd0)
|
||||||
sampleV116HashValue = sampleV115HashValue
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConsistentHashContainer(t *testing.T) {
|
func TestConsistentHashContainer(t *testing.T) {
|
||||||
@ -79,11 +78,7 @@ func TestConsistentHashContainer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
currentHash := HashContainer(container)
|
currentHash := HashContainer(container)
|
||||||
if currentHash != sampleV116HashValue {
|
if currentHash != sampleV131HashValue {
|
||||||
t.Errorf("mismatched hash value with v1.16")
|
t.Errorf("mismatched hash value with v1.31")
|
||||||
}
|
|
||||||
|
|
||||||
if currentHash != sampleV115HashValue {
|
|
||||||
t.Errorf("mismatched hash value with v1.15")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,28 +110,20 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
|
|||||||
// Note: remember to update hashValues in container_hash_test.go as well.
|
// Note: remember to update hashValues in container_hash_test.go as well.
|
||||||
func HashContainer(container *v1.Container) uint64 {
|
func HashContainer(container *v1.Container) uint64 {
|
||||||
hash := fnv.New32a()
|
hash := fnv.New32a()
|
||||||
// Omit nil or empty field when calculating hash value
|
containerJSON, _ := json.Marshal(pickFieldsToHash(container))
|
||||||
// Please see https://github.com/kubernetes/kubernetes/issues/53644
|
|
||||||
containerJSON, _ := json.Marshal(container)
|
|
||||||
hashutil.DeepHashObject(hash, containerJSON)
|
hashutil.DeepHashObject(hash, containerJSON)
|
||||||
return uint64(hash.Sum32())
|
return uint64(hash.Sum32())
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashContainerWithoutResources returns the hash of the container with Resources field zero'd out.
|
// pickFieldsToHash pick fields that will affect the running status of the container for hash,
|
||||||
func HashContainerWithoutResources(container *v1.Container) uint64 {
|
// currently this field range only contains `image` and `name`.
|
||||||
// InPlacePodVerticalScaling enables mutable Resources field.
|
// Note: this list must be updated if ever kubelet wants to allow mutations to other fields.
|
||||||
// Changes to this field may not require container restart depending on policy.
|
func pickFieldsToHash(container *v1.Container) map[string]string {
|
||||||
// Compute hash over fields besides the Resources field
|
retval := map[string]string{
|
||||||
// NOTE: This is needed during alpha and beta so that containers using Resources but
|
"name": container.Name,
|
||||||
// not subject to In-place resize are not unexpectedly restarted when
|
"image": container.Image,
|
||||||
// InPlacePodVerticalScaling feature-gate is toggled.
|
}
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Remove this in GA+1 and make HashContainerWithoutResources to become Hash.
|
return retval
|
||||||
hashWithoutResources := fnv.New32a()
|
|
||||||
containerCopy := container.DeepCopy()
|
|
||||||
containerCopy.Resources = v1.ResourceRequirements{}
|
|
||||||
containerJSON, _ := json.Marshal(containerCopy)
|
|
||||||
hashutil.DeepHashObject(hashWithoutResources, containerJSON)
|
|
||||||
return uint64(hashWithoutResources.Sum32())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// envVarsToMap constructs a map of environment name to value from a slice
|
// envVarsToMap constructs a map of environment name to value from a slice
|
||||||
@ -276,7 +268,6 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
|
|||||||
ImageRef: containerStatus.ImageRef,
|
ImageRef: containerStatus.ImageRef,
|
||||||
ImageRuntimeHandler: containerStatus.ImageRuntimeHandler,
|
ImageRuntimeHandler: containerStatus.ImageRuntimeHandler,
|
||||||
Hash: containerStatus.Hash,
|
Hash: containerStatus.Hash,
|
||||||
HashWithoutResources: containerStatus.HashWithoutResources,
|
|
||||||
State: containerStatus.State,
|
State: containerStatus.State,
|
||||||
}
|
}
|
||||||
runningPod.Containers = append(runningPod.Containers, container)
|
runningPod.Containers = append(runningPod.Containers, container)
|
||||||
|
@ -657,7 +657,7 @@ func TestHashContainer(t *testing.T) {
|
|||||||
"echo abc",
|
"echo abc",
|
||||||
},
|
},
|
||||||
containerPort: int32(8001),
|
containerPort: int32(8001),
|
||||||
expectedHash: uint64(0x3c42280f),
|
expectedHash: uint64(0x8e45cbd0),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -938,7 +938,7 @@ func TestHashContainerWithoutResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired},
|
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired},
|
||||||
},
|
},
|
||||||
0x5f62cb4c,
|
0x11a6d6d6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Burstable pod with memory policy restart required",
|
"Burstable pod with memory policy restart required",
|
||||||
@ -951,7 +951,7 @@ func TestHashContainerWithoutResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired},
|
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired},
|
||||||
},
|
},
|
||||||
0xcdab9e00,
|
0x11a6d6d6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Guaranteed pod with CPU policy restart required",
|
"Guaranteed pod with CPU policy restart required",
|
||||||
@ -964,7 +964,7 @@ func TestHashContainerWithoutResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired},
|
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired},
|
||||||
},
|
},
|
||||||
0x5f62cb4c,
|
0x11a6d6d6,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Guaranteed pod with memory policy restart required",
|
"Guaranteed pod with memory policy restart required",
|
||||||
@ -977,13 +977,13 @@ func TestHashContainerWithoutResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired},
|
ResizePolicy: []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired},
|
||||||
},
|
},
|
||||||
0xcdab9e00,
|
0x11a6d6d6,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
containerCopy := tc.container.DeepCopy()
|
containerCopy := tc.container.DeepCopy()
|
||||||
hash := HashContainerWithoutResources(tc.container)
|
hash := HashContainer(tc.container)
|
||||||
assert.Equal(t, tc.expectedHash, hash, "[%s]", tc.name)
|
assert.Equal(t, tc.expectedHash, hash, "[%s]", tc.name)
|
||||||
assert.Equal(t, containerCopy, tc.container, "[%s]", tc.name)
|
assert.Equal(t, containerCopy, tc.container, "[%s]", tc.name)
|
||||||
})
|
})
|
||||||
|
@ -295,11 +295,6 @@ type Container struct {
|
|||||||
// Hash of the container, used for comparison. Optional for containers
|
// Hash of the container, used for comparison. Optional for containers
|
||||||
// not managed by kubelet.
|
// not managed by kubelet.
|
||||||
Hash uint64
|
Hash uint64
|
||||||
// Hash of the container over fields with Resources field zero'd out.
|
|
||||||
// NOTE: This is needed during alpha and beta so that containers using Resources are
|
|
||||||
// not unexpectedly restarted when InPlacePodVerticalScaling feature-gate is toggled.
|
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Remove this in GA+1 and make HashWithoutResources to become Hash.
|
|
||||||
HashWithoutResources uint64
|
|
||||||
// State is the state of the container.
|
// State is the state of the container.
|
||||||
State State
|
State State
|
||||||
}
|
}
|
||||||
@ -365,8 +360,6 @@ type Status struct {
|
|||||||
ImageRuntimeHandler string
|
ImageRuntimeHandler string
|
||||||
// Hash of the container, used for comparison.
|
// Hash of the container, used for comparison.
|
||||||
Hash uint64
|
Hash uint64
|
||||||
// Hash of the container over fields with Resources field zero'd out.
|
|
||||||
HashWithoutResources uint64
|
|
||||||
// Number of times that the container has been restarted.
|
// Number of times that the container has been restarted.
|
||||||
RestartCount int
|
RestartCount int
|
||||||
// A string explains why container is in such a status.
|
// A string explains why container is in such a status.
|
||||||
|
@ -109,7 +109,6 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*k
|
|||||||
ImageRuntimeHandler: c.Image.RuntimeHandler,
|
ImageRuntimeHandler: c.Image.RuntimeHandler,
|
||||||
Image: c.Image.Image,
|
Image: c.Image.Image,
|
||||||
Hash: annotatedInfo.Hash,
|
Hash: annotatedInfo.Hash,
|
||||||
HashWithoutResources: annotatedInfo.HashWithoutResources,
|
|
||||||
State: toKubeContainerState(c.State),
|
State: toKubeContainerState(c.State),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -628,7 +628,6 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
|
|||||||
ImageRef: status.ImageRef,
|
ImageRef: status.ImageRef,
|
||||||
ImageRuntimeHandler: status.Image.RuntimeHandler,
|
ImageRuntimeHandler: status.Image.RuntimeHandler,
|
||||||
Hash: annotatedInfo.Hash,
|
Hash: annotatedInfo.Hash,
|
||||||
HashWithoutResources: annotatedInfo.HashWithoutResources,
|
|
||||||
RestartCount: annotatedInfo.RestartCount,
|
RestartCount: annotatedInfo.RestartCount,
|
||||||
State: toKubeContainerState(status.State),
|
State: toKubeContainerState(status.State),
|
||||||
CreatedAt: time.Unix(0, status.CreatedAt),
|
CreatedAt: time.Unix(0, status.CreatedAt),
|
||||||
|
@ -518,6 +518,10 @@ func (p podActions) String() string {
|
|||||||
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill)
|
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
|
||||||
|
// Currently, there are only `image` and `name` fields.
|
||||||
|
// we don't need to consider the pod UID here, because we find the containerStatus through the pod UID.
|
||||||
|
// If the pod UID changes, we will not be able to find the containerStatus to compare against.
|
||||||
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
|
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
|
||||||
expectedHash := kubecontainer.HashContainer(container)
|
expectedHash := kubecontainer.HashContainer(container)
|
||||||
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
|
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
|
||||||
@ -981,10 +985,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
|||||||
var message string
|
var message string
|
||||||
var reason containerKillReason
|
var reason containerKillReason
|
||||||
restart := shouldRestartOnFailure(pod)
|
restart := shouldRestartOnFailure(pod)
|
||||||
// Do not restart if only the Resources field has changed with InPlacePodVerticalScaling enabled
|
if _, _, changed := containerChanged(&container, containerStatus); changed {
|
||||||
if _, _, changed := containerChanged(&container, containerStatus); changed &&
|
|
||||||
(!isInPlacePodVerticalScalingAllowed(pod) ||
|
|
||||||
kubecontainer.HashContainerWithoutResources(&container) != containerStatus.HashWithoutResources) {
|
|
||||||
message = fmt.Sprintf("Container %s definition changed", container.Name)
|
message = fmt.Sprintf("Container %s definition changed", container.Name)
|
||||||
// Restart regardless of the restart policy because the container
|
// Restart regardless of the restart policy because the container
|
||||||
// spec changed.
|
// spec changed.
|
||||||
|
@ -2436,7 +2436,6 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
// compute hash
|
// compute hash
|
||||||
if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
|
if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
|
||||||
kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
|
kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
|
||||||
kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||||
@ -2452,7 +2451,6 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
for idx := range pod.Spec.Containers {
|
for idx := range pod.Spec.Containers {
|
||||||
if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
|
if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
|
||||||
kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
|
kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
|
||||||
kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if test.mutatePodFn != nil {
|
if test.mutatePodFn != nil {
|
||||||
|
@ -22,10 +22,8 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubelet/pkg/types"
|
"k8s.io/kubelet/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,7 +33,6 @@ const (
|
|||||||
podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
|
||||||
|
|
||||||
containerHashLabel = "io.kubernetes.container.hash"
|
containerHashLabel = "io.kubernetes.container.hash"
|
||||||
containerHashWithoutResourcesLabel = "io.kubernetes.container.hashWithoutResources"
|
|
||||||
containerRestartCountLabel = "io.kubernetes.container.restartCount"
|
containerRestartCountLabel = "io.kubernetes.container.restartCount"
|
||||||
containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
|
containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
|
||||||
containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
|
containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
|
||||||
@ -65,7 +62,6 @@ type labeledContainerInfo struct {
|
|||||||
|
|
||||||
type annotatedContainerInfo struct {
|
type annotatedContainerInfo struct {
|
||||||
Hash uint64
|
Hash uint64
|
||||||
HashWithoutResources uint64
|
|
||||||
RestartCount int
|
RestartCount int
|
||||||
PodDeletionGracePeriod *int64
|
PodDeletionGracePeriod *int64
|
||||||
PodTerminationGracePeriod *int64
|
PodTerminationGracePeriod *int64
|
||||||
@ -117,9 +113,6 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
|||||||
}
|
}
|
||||||
|
|
||||||
annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
|
||||||
annotations[containerHashWithoutResourcesLabel] = strconv.FormatUint(kubecontainer.HashContainerWithoutResources(container), 16)
|
|
||||||
}
|
|
||||||
annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
|
annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
|
||||||
annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath
|
annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath
|
||||||
annotations[containerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
|
annotations[containerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
|
||||||
@ -200,11 +193,6 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo
|
|||||||
if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil {
|
if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil {
|
||||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerHashLabel, "annotations", annotations)
|
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerHashLabel, "annotations", annotations)
|
||||||
}
|
}
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
|
||||||
if containerInfo.HashWithoutResources, err = getUint64ValueFromLabel(annotations, containerHashWithoutResourcesLabel); err != nil {
|
|
||||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerHashWithoutResourcesLabel, "annotations", annotations)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil {
|
if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil {
|
||||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerRestartCountLabel, "annotations", annotations)
|
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerRestartCountLabel, "annotations", annotations)
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,6 @@ func TestContainerAnnotations(t *testing.T) {
|
|||||||
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
|
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
|
||||||
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
|
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
|
||||||
Hash: kubecontainer.HashContainer(container),
|
Hash: kubecontainer.HashContainer(container),
|
||||||
HashWithoutResources: kubecontainer.HashContainerWithoutResources(container),
|
|
||||||
RestartCount: restartCount,
|
RestartCount: restartCount,
|
||||||
TerminationMessagePath: container.TerminationMessagePath,
|
TerminationMessagePath: container.TerminationMessagePath,
|
||||||
PreStopHandler: container.Lifecycle.PreStop,
|
PreStopHandler: container.Lifecycle.PreStop,
|
||||||
@ -182,7 +181,6 @@ func TestContainerAnnotations(t *testing.T) {
|
|||||||
expected.PreStopHandler = nil
|
expected.PreStopHandler = nil
|
||||||
// Because container is changed, the Hash should be updated
|
// Because container is changed, the Hash should be updated
|
||||||
expected.Hash = kubecontainer.HashContainer(container)
|
expected.Hash = kubecontainer.HashContainer(container)
|
||||||
expected.HashWithoutResources = kubecontainer.HashContainerWithoutResources(container)
|
|
||||||
annotations = newContainerAnnotations(container, pod, restartCount, opts)
|
annotations = newContainerAnnotations(container, pod, restartCount, opts)
|
||||||
containerInfo = getContainerInfoFromAnnotations(annotations)
|
containerInfo = getContainerInfoFromAnnotations(annotations)
|
||||||
if !reflect.DeepEqual(containerInfo, expected) {
|
if !reflect.DeepEqual(containerInfo, expected) {
|
||||||
|
Loading…
Reference in New Issue
Block a user