mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 03:33:56 +00:00
Don't allow memory limit decrease unless resize policy is RestartContainer
This commit is contained in:
parent
9f2629123f
commit
a1595d9dca
@ -335,3 +335,20 @@ func SetResizeStatus(resizeStatus api.PodResizeStatus) TweakPodStatus {
|
||||
podstatus.Resize = resizeStatus
|
||||
}
|
||||
}
|
||||
|
||||
// TweakContainers applies the container tweaks to all containers in the pod with a masked type.
|
||||
// Note: this should typically be added to pod tweaks after all containers have been added.
|
||||
func TweakContainers(tweaks ...TweakContainer) Tweak {
|
||||
return func(pod *api.Pod) {
|
||||
for i := range pod.Spec.InitContainers {
|
||||
for _, tweak := range tweaks {
|
||||
tweak(&pod.Spec.InitContainers[i])
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
for _, tweak := range tweaks {
|
||||
tweak(&pod.Spec.Containers[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5622,21 +5622,19 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
if !isRestartableInitContainer(&ctr) {
|
||||
continue
|
||||
}
|
||||
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) {
|
||||
allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers").Index(ix).Child("resources").Child("requests"), "resource requests cannot be removed"))
|
||||
}
|
||||
if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Limits, ctr.Resources.Limits) {
|
||||
allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers").Index(ix).Child("resources").Child("limits"), "resource limits cannot be removed"))
|
||||
allErrs = append(allErrs, validateContainerResize(
|
||||
&newPod.Spec.InitContainers[ix].Resources,
|
||||
&oldPod.Spec.InitContainers[ix].Resources,
|
||||
newPod.Spec.InitContainers[ix].ResizePolicy,
|
||||
specPath.Child("initContainers").Index(ix).Child("resources"))...)
|
||||
}
|
||||
}
|
||||
}
|
||||
for ix, ctr := range oldPod.Spec.Containers {
|
||||
if resourcesRemoved(newPod.Spec.Containers[ix].Resources.Requests, ctr.Resources.Requests) {
|
||||
allErrs = append(allErrs, field.Forbidden(specPath.Child("containers").Index(ix).Child("resources").Child("requests"), "resource requests cannot be removed"))
|
||||
}
|
||||
if resourcesRemoved(newPod.Spec.Containers[ix].Resources.Limits, ctr.Resources.Limits) {
|
||||
allErrs = append(allErrs, field.Forbidden(specPath.Child("containers").Index(ix).Child("resources").Child("limits"), "resource limits cannot be removed"))
|
||||
}
|
||||
for ix := range oldPod.Spec.Containers {
|
||||
allErrs = append(allErrs, validateContainerResize(
|
||||
&newPod.Spec.Containers[ix].Resources,
|
||||
&oldPod.Spec.Containers[ix].Resources,
|
||||
newPod.Spec.Containers[ix].ResizePolicy,
|
||||
specPath.Child("containers").Index(ix).Child("resources"))...)
|
||||
}
|
||||
|
||||
// Ensure that only CPU and memory resources are mutable for regular containers.
|
||||
@ -5732,6 +5730,49 @@ func isPodResizeRequestSupported(pod core.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validateContainerResize validates the changes to the container's resource requirements for a pod resize request.
|
||||
// newRequriements and oldRequirements must be non-nil.
|
||||
func validateContainerResize(newRequirements, oldRequirements *core.ResourceRequirements, resizePolicies []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
// Removing resource requirements is not supported.
|
||||
if resourcesRemoved(newRequirements.Requests, oldRequirements.Requests) {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("requests"), "resource requests cannot be removed"))
|
||||
}
|
||||
if resourcesRemoved(newRequirements.Limits, oldRequirements.Limits) {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("limits"), "resource limits cannot be removed"))
|
||||
}
|
||||
|
||||
// Special case: memory limits may not be decreased if resize policy is NotRequired.
|
||||
var memRestartPolicy core.ResourceResizeRestartPolicy
|
||||
for _, policy := range resizePolicies {
|
||||
if policy.ResourceName == core.ResourceMemory {
|
||||
memRestartPolicy = policy.RestartPolicy
|
||||
break
|
||||
}
|
||||
}
|
||||
if memRestartPolicy == core.NotRequired || memRestartPolicy == "" {
|
||||
newLimit, hasNewLimit := newRequirements.Limits[core.ResourceMemory]
|
||||
oldLimit, hasOldLimit := oldRequirements.Limits[core.ResourceMemory]
|
||||
if hasNewLimit && hasOldLimit {
|
||||
if newLimit.Cmp(oldLimit) < 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(
|
||||
fldPath.Child("limits").Key(core.ResourceMemory.String()),
|
||||
fmt.Sprintf("memory limits cannot be decreased unless resizePolicy is %s", core.RestartContainer)))
|
||||
}
|
||||
} else if hasNewLimit && !hasOldLimit {
|
||||
// Adding a memory limit is implicitly decreasing the memory limit (from 'max')
|
||||
allErrs = append(allErrs, field.Forbidden(
|
||||
fldPath.Child("limits").Key(core.ResourceMemory.String()),
|
||||
fmt.Sprintf("memory limits cannot be added unless resizePolicy is %s", core.RestartContainer)))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(tallclair): Move resizable resource checks here.
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func resourcesRemoved(resourceList, oldResourceList core.ResourceList) bool {
|
||||
if len(oldResourceList) > len(resourceList) {
|
||||
return true
|
||||
|
@ -25607,7 +25607,7 @@ func TestValidateSELinuxChangePolicy(t *testing.T) {
|
||||
|
||||
func TestValidatePodResize(t *testing.T) {
|
||||
mkPod := func(req, lim core.ResourceList, tweaks ...podtest.Tweak) *core.Pod {
|
||||
return podtest.MakePod("pod", append(tweaks,
|
||||
allTweaks := []podtest.Tweak{
|
||||
podtest.SetContainers(
|
||||
podtest.MakeContainer(
|
||||
"container",
|
||||
@ -25619,11 +25619,14 @@ func TestValidatePodResize(t *testing.T) {
|
||||
),
|
||||
),
|
||||
),
|
||||
)...)
|
||||
}
|
||||
// Prepend the SetContainers call so TweakContainers can be used.
|
||||
allTweaks = append(allTweaks, tweaks...)
|
||||
return podtest.MakePod("pod", allTweaks...)
|
||||
}
|
||||
|
||||
mkPodWithInitContainers := func(req, lim core.ResourceList, restartPolicy core.ContainerRestartPolicy, tweaks ...podtest.Tweak) *core.Pod {
|
||||
return podtest.MakePod("pod", append(tweaks,
|
||||
allTweaks := []podtest.Tweak{
|
||||
podtest.SetInitContainers(
|
||||
podtest.MakeContainer(
|
||||
"container",
|
||||
@ -25636,7 +25639,18 @@ func TestValidatePodResize(t *testing.T) {
|
||||
podtest.SetContainerRestartPolicy(restartPolicy),
|
||||
),
|
||||
),
|
||||
)...)
|
||||
}
|
||||
// Prepend the SetInitContainers call so TweakContainers can be used.
|
||||
allTweaks = append(allTweaks, tweaks...)
|
||||
return podtest.MakePod("pod", allTweaks...)
|
||||
}
|
||||
|
||||
resizePolicy := func(resource core.ResourceName, policy core.ResourceResizeRestartPolicy) podtest.Tweak {
|
||||
return podtest.TweakContainers(podtest.SetContainerResizePolicy(
|
||||
core.ContainerResizePolicy{
|
||||
ResourceName: resource,
|
||||
RestartPolicy: policy,
|
||||
}))
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -25667,14 +25681,14 @@ func TestValidatePodResize(t *testing.T) {
|
||||
new: podtest.MakePod("pod",
|
||||
podtest.SetContainers(podtest.MakeContainer("container",
|
||||
podtest.SetContainerResources(core.ResourceRequirements{
|
||||
Limits: getResources("100m", "100Mi", "", ""),
|
||||
Limits: getResources("100m", "200Mi", "", ""),
|
||||
}))),
|
||||
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
|
||||
),
|
||||
old: podtest.MakePod("pod",
|
||||
podtest.SetContainers(podtest.MakeContainer("container",
|
||||
podtest.SetContainerResources(core.ResourceRequirements{
|
||||
Limits: getResources("100m", "200Mi", "", ""),
|
||||
Limits: getResources("100m", "100Mi", "", ""),
|
||||
}))),
|
||||
podtest.SetPodResources(&core.ResourceRequirements{Limits: getResources("100m", "200Mi", "", "")}),
|
||||
),
|
||||
@ -25793,9 +25807,24 @@ func TestValidatePodResize(t *testing.T) {
|
||||
new: mkPod(core.ResourceList{}, getResources("200m", "0", "1Gi", "")),
|
||||
err: "",
|
||||
}, {
|
||||
test: "memory limit change",
|
||||
test: "memory limit increase",
|
||||
old: mkPod(core.ResourceList{}, getResources("100m", "100Mi", "", "")),
|
||||
new: mkPod(core.ResourceList{}, getResources("100m", "200Mi", "", "")),
|
||||
err: "",
|
||||
}, {
|
||||
test: "no restart policy: memory limit decrease",
|
||||
old: mkPod(core.ResourceList{}, getResources("100m", "200Mi", "", "")),
|
||||
new: mkPod(core.ResourceList{}, getResources("100m", "100Mi", "", "")),
|
||||
err: "memory limits cannot be decreased",
|
||||
}, {
|
||||
test: "restart NotRequired: memory limit decrease",
|
||||
old: mkPod(core.ResourceList{}, getResources("100m", "200Mi", "", ""), resizePolicy("memory", core.NotRequired)),
|
||||
new: mkPod(core.ResourceList{}, getResources("100m", "100Mi", "", ""), resizePolicy("memory", core.NotRequired)),
|
||||
err: "memory limits cannot be decreased",
|
||||
}, {
|
||||
test: "RestartContainer: memory limit decrease",
|
||||
old: mkPod(core.ResourceList{}, getResources("100m", "200Mi", "", ""), resizePolicy("memory", core.RestartContainer)),
|
||||
new: mkPod(core.ResourceList{}, getResources("100m", "100Mi", "", ""), resizePolicy("memory", core.RestartContainer)),
|
||||
err: "",
|
||||
}, {
|
||||
test: "storage limit change",
|
||||
@ -25824,13 +25853,13 @@ func TestValidatePodResize(t *testing.T) {
|
||||
err: "",
|
||||
}, {
|
||||
test: "Pod QoS unchanged, burstable -> burstable",
|
||||
old: mkPod(getResources("200m", "200Mi", "1Gi", ""), getResources("400m", "400Mi", "2Gi", "")),
|
||||
new: mkPod(getResources("100m", "100Mi", "1Gi", ""), getResources("200m", "200Mi", "2Gi", "")),
|
||||
old: mkPod(getResources("200m", "100Mi", "1Gi", ""), getResources("400m", "200Mi", "2Gi", "")),
|
||||
new: mkPod(getResources("100m", "200Mi", "1Gi", ""), getResources("200m", "400Mi", "2Gi", "")),
|
||||
err: "",
|
||||
}, {
|
||||
test: "Pod QoS unchanged, burstable -> burstable, add limits",
|
||||
old: mkPod(getResources("100m", "100Mi", "", ""), core.ResourceList{}),
|
||||
new: mkPod(getResources("100m", "100Mi", "", ""), getResources("200m", "200Mi", "", "")),
|
||||
new: mkPod(getResources("100m", "100Mi", "", ""), getResources("200m", "", "", "")),
|
||||
err: "",
|
||||
}, {
|
||||
test: "Pod QoS unchanged, burstable -> burstable, add requests",
|
||||
@ -26005,9 +26034,24 @@ func TestValidatePodResize(t *testing.T) {
|
||||
new: mkPodWithInitContainers(core.ResourceList{}, getResources("200m", "0", "1Gi", ""), core.ContainerRestartPolicyAlways),
|
||||
err: "",
|
||||
}, {
|
||||
test: "memory limit change for sidecar containers",
|
||||
test: "memory limit increase for sidecar containers",
|
||||
old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "", ""), core.ContainerRestartPolicyAlways),
|
||||
new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "200Mi", "", ""), core.ContainerRestartPolicyAlways),
|
||||
err: "",
|
||||
}, {
|
||||
test: "memory limit decrease for sidecar containers, no resize policy",
|
||||
old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "200Mi", "", ""), core.ContainerRestartPolicyAlways),
|
||||
new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "", ""), core.ContainerRestartPolicyAlways),
|
||||
err: "memory limits cannot be decreased",
|
||||
}, {
|
||||
test: "memory limit decrease for sidecar containers, resize policy NotRequired",
|
||||
old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "200Mi", "", ""), core.ContainerRestartPolicyAlways, resizePolicy(core.ResourceMemory, core.NotRequired)),
|
||||
new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "", ""), core.ContainerRestartPolicyAlways, resizePolicy(core.ResourceMemory, core.NotRequired)),
|
||||
err: "memory limits cannot be decreased",
|
||||
}, {
|
||||
test: "memory limit decrease for sidecar containers, resize policy RestartContainer",
|
||||
old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "200Mi", "", ""), core.ContainerRestartPolicyAlways, resizePolicy(core.ResourceMemory, core.RestartContainer)),
|
||||
new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "", ""), core.ContainerRestartPolicyAlways, resizePolicy(core.ResourceMemory, core.RestartContainer)),
|
||||
err: "",
|
||||
}, {
|
||||
test: "storage limit change for sidecar containers",
|
||||
@ -26033,6 +26077,7 @@ func TestValidatePodResize(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
test.new.ObjectMeta.ResourceVersion = "1"
|
||||
test.old.ObjectMeta.ResourceVersion = "1"
|
||||
|
||||
@ -26058,17 +26103,18 @@ func TestValidatePodResize(t *testing.T) {
|
||||
test.old.Spec.RestartPolicy = "Always"
|
||||
}
|
||||
|
||||
errs := ValidatePodResize(test.new, test.old, PodValidationOptions{})
|
||||
errs := ValidatePodResize(test.new, test.old, PodValidationOptions{AllowSidecarResizePolicy: true})
|
||||
if test.err == "" {
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected invalid: %s (%+v)\nA: %+v\nB: %+v", test.test, errs, test.new, test.old)
|
||||
t.Errorf("unexpected invalid: (%+v)\nA: %+v\nB: %+v", errs, test.new, test.old)
|
||||
}
|
||||
} else {
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("unexpected valid: %s\nA: %+v\nB: %+v", test.test, test.new, test.old)
|
||||
t.Errorf("unexpected valid:\nA: %+v\nB: %+v", test.new, test.old)
|
||||
} else if actualErr := errs.ToAggregate().Error(); !strings.Contains(actualErr, test.err) {
|
||||
t.Errorf("unexpected error message: %s\nExpected error: %s\nActual error: %s", test.test, test.err, actualErr)
|
||||
t.Errorf("unexpected error message:\nExpected error: %s\nActual error: %s", test.err, actualErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user