mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Verify oom_score_adj for containers that have been restarted in pod resize e2e
This commit is contained in:
parent
ec1b493a08
commit
1208f25b3f
@ -2109,7 +2109,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||||||
if _, exists := resources.Requests[v1.ResourceMemory]; exists {
|
if _, exists := resources.Requests[v1.ResourceMemory]; exists {
|
||||||
// Get memory requests from actuated resources
|
// Get memory requests from actuated resources
|
||||||
if actuatedResources, found := kl.allocationManager.GetActuatedResources(pod.UID, allocatedContainer.Name); found {
|
if actuatedResources, found := kl.allocationManager.GetActuatedResources(pod.UID, allocatedContainer.Name); found {
|
||||||
resources.Requests[v1.ResourceMemory] = actuatedResources.Requests.Memory().DeepCopy()
|
resources.Requests[v1.ResourceMemory] = *actuatedResources.Requests.Memory()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
helpers "k8s.io/component-helpers/resource"
|
helpers "k8s.io/component-helpers/resource"
|
||||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
|
kubeqos "k8s.io/kubernetes/pkg/kubelet/qos"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -359,22 +360,22 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
|||||||
return utilerrors.NewAggregate(errs)
|
return utilerrors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyPodRestarts(pod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
func verifyPodRestarts(f *framework.Framework, pod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
|
||||||
initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo)
|
initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo)
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
if err := verifyContainerRestarts(pod.Status.InitContainerStatuses, initCtrStatuses); err != nil {
|
if err := verifyContainerRestarts(f, pod, pod.Status.InitContainerStatuses, initCtrStatuses); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
if err := verifyContainerRestarts(pod.Status.ContainerStatuses, ctrStatuses); err != nil {
|
if err := verifyContainerRestarts(f, pod, pod.Status.ContainerStatuses, ctrStatuses); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return utilerrors.NewAggregate(errs)
|
return utilerrors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyContainerRestarts(gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error {
|
func verifyContainerRestarts(f *framework.Framework, pod *v1.Pod, gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
|
||||||
if len(gotStatuses) != len(wantStatuses) {
|
if len(gotStatuses) != len(wantStatuses) {
|
||||||
@ -386,11 +387,34 @@ func verifyContainerRestarts(gotStatuses []v1.ContainerStatus, wantStatuses []v1
|
|||||||
for i, gotStatus := range gotStatuses {
|
for i, gotStatus := range gotStatuses {
|
||||||
if gotStatus.RestartCount != wantStatuses[i].RestartCount {
|
if gotStatus.RestartCount != wantStatuses[i].RestartCount {
|
||||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount))
|
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount))
|
||||||
|
} else if gotStatus.RestartCount > 0 {
|
||||||
|
err := verifyOomScoreAdj(f, pod, gotStatus.Name)
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return utilerrors.NewAggregate(errs)
|
return utilerrors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||||
|
container := FindContainerInPod(pod, containerName)
|
||||||
|
if container == nil {
|
||||||
|
return fmt.Errorf("failed to find container %s in pod %s", containerName, pod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.Background(), pod.Spec.NodeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeMemoryCapacity := node.Status.Capacity[v1.ResourceMemory]
|
||||||
|
oomScoreAdj := kubeqos.GetContainerOOMScoreAdjust(pod, container, int64(nodeMemoryCapacity.Value()))
|
||||||
|
expectedOomScoreAdj := strconv.FormatInt(int64(oomScoreAdj), 10)
|
||||||
|
|
||||||
|
return VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj)
|
||||||
|
}
|
||||||
|
|
||||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
|
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
// Wait for resize to complete.
|
// Wait for resize to complete.
|
||||||
@ -440,7 +464,7 @@ func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v
|
|||||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||||
}
|
}
|
||||||
if restartErrs := verifyPodRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
if restartErrs := verifyPodRestarts(f, resizedPod, expectedContainers); restartErrs != nil {
|
||||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,6 +256,21 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FindContainerByName finds the v1.Container in a pod by its name in the provided pod
|
||||||
|
func FindContainerInPod(pod *v1.Pod, containerName string) *v1.Container {
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
if container.Name == containerName {
|
||||||
|
return &container
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, container := range pod.Spec.Containers {
|
||||||
|
if container.Name == containerName {
|
||||||
|
return &container
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// FindContainerStatusInPod finds a container status by its name in the provided pod
|
// FindContainerStatusInPod finds a container status by its name in the provided pod
|
||||||
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
||||||
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
||||||
|
Loading…
Reference in New Issue
Block a user