apply feedback

This commit is contained in:
Anish Shah 2024-11-04 11:39:00 -08:00
parent 4c69bf2496
commit 832d7f7dc2
5 changed files with 49 additions and 33 deletions

View File

@ -5491,7 +5491,7 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali
func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
// Part 1: Validate newPod's spec and updates to metadata // Part 1: Validate newPod's spec and updates to metadata
fldPath := field.NewPath("metadata") fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs := ValidateImmutableField(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...) allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)

View File

@ -155,11 +155,15 @@ func (p *podEvaluator) GroupResource() schema.GroupResource {
// Handles returns true if the evaluator should handle the specified attributes. // Handles returns true if the evaluator should handle the specified attributes.
func (p *podEvaluator) Handles(a admission.Attributes) bool { func (p *podEvaluator) Handles(a admission.Attributes) bool {
if a.GetSubresource() != "" && a.GetSubresource() != "resize" { op := a.GetOperation()
switch a.GetSubresource() {
case "":
return op == admission.Create
case "resize":
return op == admission.Update
default:
return false return false
} }
op := a.GetOperation()
return op == admission.Create || (a.GetSubresource() == "resize" && op == admission.Update)
} }
// Matches returns true if the evaluator matches the specified quota with the provided input item // Matches returns true if the evaluator matches the specified quota with the provided input item

View File

@ -275,19 +275,31 @@ func (podEphemeralContainersStrategy) WarningsOnUpdate(ctx context.Context, obj,
type podResizeStrategy struct { type podResizeStrategy struct {
podStrategy podStrategy
resetFieldsFilter fieldpath.Filter
} }
// ResizeStrategy wraps and exports the used podStrategy for the storage package. // ResizeStrategy wraps and exports the used podStrategy for the storage package.
var ResizeStrategy = podResizeStrategy{Strategy} var ResizeStrategy = podResizeStrategy{
podStrategy: Strategy,
resetFieldsFilter: fieldpath.NewIncludeMatcherFilter(
fieldpath.MakePrefixMatcherOrDie("spec", "containers", fieldpath.MatchAnyPathElement(), "resources"),
fieldpath.MakePrefixMatcherOrDie("spec", "containers", fieldpath.MatchAnyPathElement(), "resizePolicy"),
),
}
// dropNonPodResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources,ResizePolicy and certain metadata // dropNonResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources,ResizePolicy and certain metadata
func dropNonPodResizeUpdates(newPod, oldPod *api.Pod) *api.Pod { func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod {
pod := dropPodUpdates(newPod, oldPod) pod := dropPodUpdates(newPod, oldPod)
// Containers are not allowed to be re-ordered, but in case they were,
// we don't want to corrupt them here. It will get caught in validation.
oldCtrToIndex := make(map[string]int) oldCtrToIndex := make(map[string]int)
for idx, ctr := range pod.Spec.Containers { for idx, ctr := range pod.Spec.Containers {
oldCtrToIndex[ctr.Name] = idx oldCtrToIndex[ctr.Name] = idx
} }
// TODO: Once we add in-place pod resize support for sidecars, we need to allow
// modifying sidecar resources via resize subresource too.
for _, ctr := range newPod.Spec.Containers { for _, ctr := range newPod.Spec.Containers {
idx, ok := oldCtrToIndex[ctr.Name] idx, ok := oldCtrToIndex[ctr.Name]
if !ok { if !ok {
@ -303,7 +315,7 @@ func (podResizeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
newPod := obj.(*api.Pod) newPod := obj.(*api.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*api.Pod)
*newPod = *dropNonPodResizeUpdates(newPod, oldPod) *newPod = *dropNonResizeUpdates(newPod, oldPod)
podutil.MarkPodProposedForResize(oldPod, newPod) podutil.MarkPodProposedForResize(oldPod, newPod)
podutil.DropDisabledPodFields(newPod, oldPod) podutil.DropDisabledPodFields(newPod, oldPod)
} }
@ -323,12 +335,9 @@ func (podResizeStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.
// GetResetFieldsFilter returns a set of fields filter reset by the strategy // GetResetFieldsFilter returns a set of fields filter reset by the strategy
// and should not be modified by the user. // and should not be modified by the user.
func (podResizeStrategy) GetResetFieldsFilter() map[fieldpath.APIVersion]fieldpath.Filter { func (p podResizeStrategy) GetResetFieldsFilter() map[fieldpath.APIVersion]fieldpath.Filter {
return map[fieldpath.APIVersion]fieldpath.Filter{ return map[fieldpath.APIVersion]fieldpath.Filter{
"v1": fieldpath.NewIncludeMatcherFilter( "v1": p.resetFieldsFilter,
fieldpath.MakePrefixMatcherOrDie("spec", "containers", fieldpath.MatchAnyPathElement(), "resources"),
fieldpath.MakePrefixMatcherOrDie("spec", "containers", fieldpath.MatchAnyPathElement(), "resizePolicy"),
),
} }
} }

View File

@ -415,7 +415,8 @@ func (d *DefaultLimitRangerActions) ValidateLimit(limitRange *corev1.LimitRange,
// SupportsAttributes ignores all calls that do not deal with pod resources or storage requests (PVCs). // SupportsAttributes ignores all calls that do not deal with pod resources or storage requests (PVCs).
// Also ignores any call that has a subresource defined. // Also ignores any call that has a subresource defined.
func (d *DefaultLimitRangerActions) SupportsAttributes(a admission.Attributes) bool { func (d *DefaultLimitRangerActions) SupportsAttributes(a admission.Attributes) bool {
// Handle the special case for in-place pod vertical scaling // Handle in-place vertical scaling of pods, where users modify container
// resources using the resize subresource.
if a.GetSubresource() == "resize" && a.GetKind().GroupKind() == api.Kind("Pod") && a.GetOperation() == admission.Update { if a.GetSubresource() == "resize" && a.GetKind().GroupKind() == api.Kind("Pod") && a.GetOperation() == admission.Update {
return true return true
} }

View File

@ -40,17 +40,17 @@ import (
func doPodResizeAdmissionPluginsTests(f *framework.Framework) { func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
testcases := []struct { testcases := []struct {
name string name string
enableAdmissionPlugin func(ctx context.Context, f *framework.Framework) enableAdmissionPlugin func(ctx context.Context, f *framework.Framework, ns string)
wantMemoryError string wantMemoryError string
wantCPUError string wantCPUError string
}{ }{
{ {
name: "pod-resize-resource-quota-test", name: "pod-resize-resource-quota-test",
enableAdmissionPlugin: func(ctx context.Context, f *framework.Framework) { enableAdmissionPlugin: func(ctx context.Context, f *framework.Framework, ns string) {
resourceQuota := v1.ResourceQuota{ resourceQuota := v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "resize-resource-quota", Name: "resize-resource-quota",
Namespace: f.Namespace.Name, Namespace: ns,
}, },
Spec: v1.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{ Hard: v1.ResourceList{
@ -61,7 +61,7 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
} }
ginkgo.By("Creating a ResourceQuota") ginkgo.By("Creating a ResourceQuota")
_, rqErr := f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Create(ctx, &resourceQuota, metav1.CreateOptions{}) _, rqErr := f.ClientSet.CoreV1().ResourceQuotas(ns).Create(ctx, &resourceQuota, metav1.CreateOptions{})
framework.ExpectNoError(rqErr, "failed to create resource quota") framework.ExpectNoError(rqErr, "failed to create resource quota")
}, },
wantMemoryError: "exceeded quota: resize-resource-quota, requested: memory=350Mi, used: memory=700Mi, limited: memory=800Mi", wantMemoryError: "exceeded quota: resize-resource-quota, requested: memory=350Mi, used: memory=700Mi, limited: memory=800Mi",
@ -69,11 +69,11 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
}, },
{ {
name: "pod-resize-limit-ranger-test", name: "pod-resize-limit-ranger-test",
enableAdmissionPlugin: func(ctx context.Context, f *framework.Framework) { enableAdmissionPlugin: func(ctx context.Context, f *framework.Framework, ns string) {
lr := v1.LimitRange{ lr := v1.LimitRange{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "resize-limit-ranger", Name: "resize-limit-ranger",
Namespace: f.Namespace.Name, Namespace: ns,
}, },
Spec: v1.LimitRangeSpec{ Spec: v1.LimitRangeSpec{
Limits: []v1.LimitRangeItem{ Limits: []v1.LimitRangeItem{
@ -101,7 +101,7 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
} }
ginkgo.By("Creating a LimitRanger") ginkgo.By("Creating a LimitRanger")
_, lrErr := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(ctx, &lr, metav1.CreateOptions{}) _, lrErr := f.ClientSet.CoreV1().LimitRanges(ns).Create(ctx, &lr, metav1.CreateOptions{})
framework.ExpectNoError(lrErr, "failed to create limit ranger") framework.ExpectNoError(lrErr, "failed to create limit ranger")
}, },
wantMemoryError: "forbidden: maximum memory usage per Container is 500Mi, but limit is 750Mi", wantMemoryError: "forbidden: maximum memory usage per Container is 500Mi, but limit is 750Mi",
@ -111,6 +111,9 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
for _, tc := range testcases { for _, tc := range testcases {
ginkgo.It(tc.name, func(ctx context.Context) { ginkgo.It(tc.name, func(ctx context.Context) {
ns, err := f.CreateNamespace(ctx, tc.name, nil)
framework.ExpectNoError(err, "failed creating Namespace")
containers := []e2epod.ResizableContainerInfo{ containers := []e2epod.ResizableContainerInfo{
{ {
Name: "c1", Name: "c1",
@ -133,26 +136,25 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
{"name":"c1", "resources":{"requests":{"cpu":"250m","memory":"750Mi"},"limits":{"cpu":"250m","memory":"750Mi"}}} {"name":"c1", "resources":{"requests":{"cpu":"250m","memory":"750Mi"},"limits":{"cpu":"250m","memory":"750Mi"}}}
]}}` ]}}`
tc.enableAdmissionPlugin(ctx, f) tc.enableAdmissionPlugin(ctx, f, ns.Name)
tStamp := strconv.Itoa(time.Now().Nanosecond()) tStamp := strconv.Itoa(time.Now().Nanosecond())
e2epod.InitDefaultResizePolicy(containers) e2epod.InitDefaultResizePolicy(containers)
e2epod.InitDefaultResizePolicy(expected) e2epod.InitDefaultResizePolicy(expected)
testPod1 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, containers) testPod1 := e2epod.MakePodWithResizableContainers(ns.Name, "testpod1", tStamp, containers)
testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1) testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1)
testPod2 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, containers) testPod2 := e2epod.MakePodWithResizableContainers(ns.Name, "testpod2", tStamp, containers)
testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2) testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2)
ginkgo.By("creating pods") ginkgo.By("creating pods")
podClient := e2epod.NewPodClient(f) podClient := e2epod.NewPodClient(f)
newPod1 := podClient.CreateSync(ctx, testPod1) newPods := podClient.CreateBatch(ctx, []*v1.Pod{testPod1, testPod2})
newPod2 := podClient.CreateSync(ctx, testPod2)
ginkgo.By("verifying initial pod resources, and policy are as expected") ginkgo.By("verifying initial pod resources, and policy are as expected")
e2epod.VerifyPodResources(newPod1, containers) e2epod.VerifyPodResources(newPods[0], containers)
ginkgo.By("patching pod for resize within resource quota") ginkgo.By("patching pod for resize within resource quota")
patchedPod, pErr := f.ClientSet.CoreV1().Pods(newPod1.Namespace).Patch(ctx, newPod1.Name, patchedPod, pErr := f.ClientSet.CoreV1().Pods(newPods[0].Namespace).Patch(ctx, newPods[0].Name,
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{}, "resize") types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{}, "resize")
framework.ExpectNoError(pErr, "failed to patch pod for resize") framework.ExpectNoError(pErr, "failed to patch pod for resize")
@ -160,7 +162,7 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
e2epod.VerifyPodResources(patchedPod, expected) e2epod.VerifyPodResources(patchedPod, expected)
ginkgo.By("waiting for resize to be actuated") ginkgo.By("waiting for resize to be actuated")
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod1) resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPods[0])
e2epod.ExpectPodResized(ctx, f, resizedPod, expected) e2epod.ExpectPodResized(ctx, f, resizedPod, expected)
ginkgo.By("verifying pod resources after resize") ginkgo.By("verifying pod resources after resize")
@ -189,10 +191,10 @@ func doPodResizeAdmissionPluginsTests(f *framework.Framework) {
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected)) framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
ginkgo.By("deleting pods") ginkgo.By("deleting pods")
delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod1) delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPods[0])
framework.ExpectNoError(delErr1, "failed to delete pod %s", newPod1.Name) framework.ExpectNoError(delErr1, "failed to delete pod %s", newPods[0].Name)
delErr2 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod2) delErr2 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPods[1])
framework.ExpectNoError(delErr2, "failed to delete pod %s", newPod2.Name) framework.ExpectNoError(delErr2, "failed to delete pod %s", newPods[1].Name)
}) })
} }