From a482d8937b6bbf5b60c98cb0fea7b52093bc2292 Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Sun, 7 Apr 2024 16:24:26 +0300 Subject: [PATCH 1/9] Add extended resources to ContainerStatuses[i].Resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 6867a4d4a1f..54fbca6849b 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2110,6 +2110,15 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if ephemeralStorage, found := container.Resources.Limits[v1.ResourceEphemeralStorage]; found { limits[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } + + for extendedResourceName, extendedResourceQuantity := range container.Resources.Limits { + if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || + extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { + continue + } + + limits[extendedResourceName] = extendedResourceQuantity.DeepCopy() + } } // Convert Requests if status.AllocatedResources != nil { @@ -2125,6 +2134,15 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if ephemeralStorage, found := status.AllocatedResources[v1.ResourceEphemeralStorage]; found { requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } + + for extendedResourceName, extendedResourceQuantity := range status.AllocatedResources { + if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || + extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { + continue + } + + requests[extendedResourceName] = extendedResourceQuantity.DeepCopy() + } } //TODO(vinaykul,derekwaynecarr,InPlacePodVerticalScaling): Update this to include extended resources in // addition to CPU, memory, ephemeral storage. Add test case for extended resources. From 47207f9aad2d8b763f441eca3a8dff27afa89010 Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Mon, 8 Apr 2024 09:37:53 +0300 Subject: [PATCH 2/9] unit test: Add extended resources to ContainerStatuses[i].Resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods_test.go | 83 ++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index f3019e247bf..e44822d0ac3 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -4588,6 +4588,20 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) { CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy() CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi") + addExtendedResource := func(list v1.ResourceList) v1.ResourceList { + const stubCustomResource = v1.ResourceName("dummy.io/dummy") + + withExtendedResource := list.DeepCopy() + for _, resourceName := range []v1.ResourceName{v1.ResourceMemory, v1.ResourceCPU} { + if _, exists := withExtendedResource[resourceName]; !exists { + withExtendedResource[resourceName] = resource.MustParse("0") + } + } + + withExtendedResource[stubCustomResource] = resource.MustParse("1") + return withExtendedResource + } + testKubelet := newTestKubelet(t, false) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -4734,6 +4748,75 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) { }, }, }, + "BestEffort QoSPod with extended resources": { + Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(v1.ResourceList{})}}, + OldStatus: []v1.ContainerStatus{ + { + Name: testContainerName, + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, + Resources: &v1.ResourceRequirements{}, + }, + }, + Expected: []v1.ContainerStatus{ + { + Name: testContainerName, + ContainerID: testContainerID.String(), + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}}, + AllocatedResources: addExtendedResource(v1.ResourceList{}), + Resources: &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})}, + }, + }, + }, + "BurstableQoSPod with extended resources": { + Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G)}}, + OldStatus: []v1.ContainerStatus{ + { + Name: testContainerName, + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, + Resources: &v1.ResourceRequirements{}, + }, + }, + Expected: []v1.ContainerStatus{ + { + Name: testContainerName, + ContainerID: testContainerID.String(), + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}}, + AllocatedResources: addExtendedResource(CPU1AndMem1G), + Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)}, + }, + }, + }, + "GuaranteedQoSPod with extended resources": { + Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)}}, + OldStatus: []v1.ContainerStatus{ + { + Name: testContainerName, + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, + Resources: &v1.ResourceRequirements{}, + }, + }, + Expected: []v1.ContainerStatus{ + { + Name: testContainerName, + ContainerID: testContainerID.String(), + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}}, + AllocatedResources: addExtendedResource(CPU1AndMem1G), + Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)}, + }, + }, + }, } { tPod := testPod.DeepCopy() tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx) From b9109ab7e4efb4e4b04344ac7a5ad0cb0a7f6ace Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Mon, 8 Apr 2024 09:42:16 +0300 Subject: [PATCH 3/9] Refactor: avoid code duplication when converting custom resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 54fbca6849b..e25fed5eba6 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2094,6 +2094,18 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if oldStatus.Resources == nil { oldStatus.Resources = &v1.ResourceRequirements{} } + + convertCustomResources := func(inResources, outResources v1.ResourceList) { + for extendedResourceName, extendedResourceQuantity := range inResources { + if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || + extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { + continue + } + + outResources[extendedResourceName] = extendedResourceQuantity.DeepCopy() + } + } + // Convert Limits if container.Resources.Limits != nil { limits = make(v1.ResourceList) @@ -2111,14 +2123,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon limits[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } - for extendedResourceName, extendedResourceQuantity := range container.Resources.Limits { - if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || - extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { - continue - } - - limits[extendedResourceName] = extendedResourceQuantity.DeepCopy() - } + convertCustomResources(container.Resources.Limits, limits) } // Convert Requests if status.AllocatedResources != nil { @@ -2135,14 +2140,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } - for extendedResourceName, extendedResourceQuantity := range status.AllocatedResources { - if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || - extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { - continue - } - - requests[extendedResourceName] = extendedResourceQuantity.DeepCopy() - } + convertCustomResources(status.AllocatedResources, requests) } //TODO(vinaykul,derekwaynecarr,InPlacePodVerticalScaling): Update this to include extended resources in // addition to CPU, memory, ephemeral storage. Add test case for extended resources. From 8811817e7ba642f2afa7f47e57b26a7a54969463 Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Thu, 18 Apr 2024 20:04:48 +0300 Subject: [PATCH 4/9] Refactor: improve convertCustomResources() variable names Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index e25fed5eba6..e808bd2dc8e 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2096,13 +2096,13 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon } convertCustomResources := func(inResources, outResources v1.ResourceList) { - for extendedResourceName, extendedResourceQuantity := range inResources { - if extendedResourceName == v1.ResourceCPU || extendedResourceName == v1.ResourceMemory || - extendedResourceName == v1.ResourceStorage || extendedResourceName == v1.ResourceEphemeralStorage { + for resourceName, resourceQuantity := range inResources { + if resourceName == v1.ResourceCPU || resourceName == v1.ResourceMemory || + resourceName == v1.ResourceStorage || resourceName == v1.ResourceEphemeralStorage { continue } - outResources[extendedResourceName] = extendedResourceQuantity.DeepCopy() + outResources[resourceName] = resourceQuantity.DeepCopy() } } From 090961f8c85555952ff4bdc2f31df4cbe7a1863e Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Wed, 28 Aug 2024 15:34:41 +0300 Subject: [PATCH 5/9] Add limits and request processing for storage resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index e808bd2dc8e..bbd382328d9 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2122,6 +2122,9 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if ephemeralStorage, found := container.Resources.Limits[v1.ResourceEphemeralStorage]; found { limits[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } + if storage, found := container.Resources.Limits[v1.ResourceStorage]; found { + limits[v1.ResourceStorage] = storage.DeepCopy() + } convertCustomResources(container.Resources.Limits, limits) } @@ -2139,6 +2142,9 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if ephemeralStorage, found := status.AllocatedResources[v1.ResourceEphemeralStorage]; found { requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy() } + if storage, found := status.AllocatedResources[v1.ResourceStorage]; found { + requests[v1.ResourceStorage] = storage.DeepCopy() + } convertCustomResources(status.AllocatedResources, requests) } From 772cdfc28c441a5f81aac2c6701ad7c82ce6bd5a Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Wed, 28 Aug 2024 16:27:04 +0300 Subject: [PATCH 6/9] unit test: Add limits and request processing for storage resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods_test.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index e44822d0ac3..264d93c9306 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -4585,8 +4585,10 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) { CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")} CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy() CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi") + CPU1AndMem1GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi") CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy() CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi") + CPU2AndMem2GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi") addExtendedResource := func(list v1.ResourceList) v1.ResourceList { const stubCustomResource = v1.ResourceName("dummy.io/dummy") @@ -4794,6 +4796,29 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) { }, }, }, + "BurstableQoSPod with storage, ephemeral storage and extended resources": { + Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)}}, + OldStatus: []v1.ContainerStatus{ + { + Name: testContainerName, + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, + Resources: &v1.ResourceRequirements{}, + }, + }, + Expected: []v1.ContainerStatus{ + { + Name: testContainerName, + ContainerID: testContainerID.String(), + Image: "img", + ImageID: "img1234", + State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}}, + AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G), + Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)}, + }, + }, + }, "GuaranteedQoSPod with extended resources": { Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)}}, OldStatus: []v1.ContainerStatus{ From 9545d45c5dde717db2af8a1c166d8ebeacb1bd15 Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Mon, 8 Apr 2024 09:42:43 +0300 Subject: [PATCH 7/9] Remove TODO regarding extended resources Signed-off-by: Itamar Holder --- pkg/kubelet/kubelet_pods.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index bbd382328d9..ff872985ce5 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -2148,8 +2148,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon convertCustomResources(status.AllocatedResources, requests) } - //TODO(vinaykul,derekwaynecarr,InPlacePodVerticalScaling): Update this to include extended resources in - // addition to CPU, memory, ephemeral storage. Add test case for extended resources. + resources := &v1.ResourceRequirements{ Limits: limits, Requests: requests, From f43f5bc821890d915d75d04d0fe009a8cca4a271 Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Wed, 24 Apr 2024 16:29:45 +0300 Subject: [PATCH 8/9] Add an e2e test Signed-off-by: Itamar Holder --- test/e2e/node/pod_resize.go | 142 ++++++++++++++++++++++++++++++++++-- 1 file changed, 134 insertions(+), 8 deletions(-) diff --git a/test/e2e/node/pod_resize.go b/test/e2e/node/pod_resize.go index 96824ae5ff7..03167d8a51f 100644 --- a/test/e2e/node/pod_resize.go +++ b/test/e2e/node/pod_resize.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" resourceapi "k8s.io/kubernetes/pkg/api/v1/resource" @@ -63,14 +64,16 @@ const ( PollInterval time.Duration = 2 * time.Second PollTimeout time.Duration = 4 * time.Minute + + fakeExtendedResource = "dummy.com/dummy" ) type ContainerResources struct { - CPUReq, CPULim, MemReq, MemLim, EphStorReq, EphStorLim string + CPUReq, CPULim, MemReq, MemLim, EphStorReq, EphStorLim, ExtendedResourceReq, ExtendedResourceLim string } type ContainerAllocations struct { - CPUAlloc, MemAlloc, ephStorAlloc string + CPUAlloc, MemAlloc, ephStorAlloc, ExtendedResourceAlloc string } type TestContainerInfo struct { @@ -146,6 +149,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1. if tcInfo.Resources.EphStorLim != "" { lim[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorLim) } + if tcInfo.Resources.ExtendedResourceLim != "" { + lim[fakeExtendedResource] = resource.MustParse(tcInfo.Resources.ExtendedResourceLim) + } if tcInfo.Resources.CPUReq != "" { req[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPUReq) } @@ -155,6 +161,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1. if tcInfo.Resources.EphStorReq != "" { req[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorReq) } + if tcInfo.Resources.ExtendedResourceReq != "" { + req[fakeExtendedResource] = resource.MustParse(tcInfo.Resources.ExtendedResourceReq) + } res = v1.ResourceRequirements{Limits: lim, Requests: req} } if tcInfo.Allocations != nil { @@ -168,7 +177,9 @@ func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1. if tcInfo.Allocations.ephStorAlloc != "" { alloc[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Allocations.ephStorAlloc) } - + if tcInfo.Allocations.ExtendedResourceAlloc != "" { + alloc[fakeExtendedResource] = resource.MustParse(tcInfo.Allocations.ExtendedResourceAlloc) + } } if tcInfo.CPUPolicy != nil { cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy} @@ -318,7 +329,8 @@ func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo, flagError boo cStatus := cStatusMap[ci.Name] if ci.Allocations == nil { if ci.Resources != nil { - alloc := &ContainerAllocations{CPUAlloc: ci.Resources.CPUReq, MemAlloc: ci.Resources.MemReq} + alloc := &ContainerAllocations{CPUAlloc: ci.Resources.CPUReq, MemAlloc: ci.Resources.MemReq, + ExtendedResourceAlloc: ci.Resources.ExtendedResourceReq} ci.Allocations = alloc defer func() { ci.Allocations = nil @@ -571,18 +583,92 @@ func genPatchString(containers []TestContainerInfo) (string, error) { return string(patchBytes), nil } +func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error { + oldData, err := json.Marshal(old) + if err != nil { + return err + } + + newData, err := json.Marshal(new) + if err != nil { + return err + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err) + } + _, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func addExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string, extendedResourceQuantity resource.Quantity) { + extendedResource := v1.ResourceName(extendedResourceName) + + ginkgo.By("Adding a custom resource") + OriginalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + node := OriginalNode.DeepCopy() + node.Status.Capacity[extendedResource] = extendedResourceQuantity + node.Status.Allocatable[extendedResource] = extendedResourceQuantity + err = patchNode(context.Background(), clientSet, OriginalNode.DeepCopy(), node) + framework.ExpectNoError(err) + + gomega.Eventually(func() error { + node, err = clientSet.CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + fakeResourceCapacity, exists := node.Status.Capacity[extendedResource] + if !exists { + return fmt.Errorf("node %s has no %s resource capacity", node.Name, extendedResourceName) + } + if expectedResource := resource.MustParse("123"); fakeResourceCapacity.Cmp(expectedResource) != 0 { + return fmt.Errorf("node %s has resource capacity %s, expected: %s", node.Name, fakeResourceCapacity.String(), expectedResource.String()) + } + + return nil + }).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred()) +} + +func removeExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string) { + extendedResource := v1.ResourceName(extendedResourceName) + + ginkgo.By("Removing a custom resource") + originalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + node := originalNode.DeepCopy() + delete(node.Status.Capacity, extendedResource) + delete(node.Status.Allocatable, extendedResource) + err = patchNode(context.Background(), clientSet, originalNode.DeepCopy(), node) + framework.ExpectNoError(err) + + gomega.Eventually(func() error { + node, err = clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + if _, exists := node.Status.Capacity[extendedResource]; exists { + return fmt.Errorf("node %s has resource capacity %s which is expected to be removed", node.Name, extendedResourceName) + } + + return nil + }).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred()) +} + func doPodResizeTests() { f := framework.NewDefaultFramework("pod-resize") var podClient *e2epod.PodClient + ginkgo.BeforeEach(func() { podClient = e2epod.NewPodClient(f) }) type testCase struct { - name string - containers []TestContainerInfo - patchString string - expected []TestContainerInfo + name string + containers []TestContainerInfo + patchString string + expected []TestContainerInfo + addExtendedResource bool } noRestart := v1.NotRequired @@ -1284,6 +1370,31 @@ func doPodResizeTests() { }, }, }, + { + name: "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource", + containers: []TestContainerInfo{ + { + Name: "c1", + Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi", + ExtendedResourceReq: "1", ExtendedResourceLim: "1"}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + }, + patchString: `{"spec":{"containers":[ + {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}} + ]}}`, + expected: []TestContainerInfo{ + { + Name: "c1", + Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi", + ExtendedResourceReq: "1", ExtendedResourceLim: "1"}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + }, + addExtendedResource: true, + }, } for idx := range tests { @@ -1291,12 +1402,23 @@ func doPodResizeTests() { ginkgo.It(tc.name, func(ctx context.Context) { var testPod, patchedPod *v1.Pod var pErr error + var nodeName string tStamp := strconv.Itoa(time.Now().Nanosecond()) initDefaultResizePolicy(tc.containers) initDefaultResizePolicy(tc.expected) testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers) + if tc.addExtendedResource { + nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) + framework.ExpectNoError(err) + + nodeName = nodes.Items[0].Name + + addExtendedResource(f.ClientSet, nodeName, fakeExtendedResource, resource.MustParse("123")) + testPod.Spec.NodeName = nodeName + } + ginkgo.By("creating pod") newPod := podClient.CreateSync(ctx, testPod) @@ -1358,6 +1480,10 @@ func doPodResizeTests() { ginkgo.By("deleting pod") err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod) framework.ExpectNoError(err, "failed to delete pod") + + if tc.addExtendedResource { + removeExtendedResource(f.ClientSet, nodeName, fakeExtendedResource) + } }) } } From 85068bce13bd3df3522dc53bd29e86e5f66eae0e Mon Sep 17 00:00:00 2001 From: Itamar Holder Date: Sun, 5 May 2024 09:54:36 +0300 Subject: [PATCH 9/9] Patch all nodes with extended resource to allow testing on every node Signed-off-by: Itamar Holder --- test/e2e/node/pod_resize.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/test/e2e/node/pod_resize.go b/test/e2e/node/pod_resize.go index 03167d8a51f..464981e3019 100644 --- a/test/e2e/node/pod_resize.go +++ b/test/e2e/node/pod_resize.go @@ -1402,7 +1402,6 @@ func doPodResizeTests() { ginkgo.It(tc.name, func(ctx context.Context) { var testPod, patchedPod *v1.Pod var pErr error - var nodeName string tStamp := strconv.Itoa(time.Now().Nanosecond()) initDefaultResizePolicy(tc.containers) @@ -1413,10 +1412,14 @@ func doPodResizeTests() { nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) framework.ExpectNoError(err) - nodeName = nodes.Items[0].Name - - addExtendedResource(f.ClientSet, nodeName, fakeExtendedResource, resource.MustParse("123")) - testPod.Spec.NodeName = nodeName + for _, node := range nodes.Items { + addExtendedResource(f.ClientSet, node.Name, fakeExtendedResource, resource.MustParse("123")) + } + defer func() { + for _, node := range nodes.Items { + removeExtendedResource(f.ClientSet, node.Name, fakeExtendedResource) + } + }() } ginkgo.By("creating pod") @@ -1480,10 +1483,6 @@ func doPodResizeTests() { ginkgo.By("deleting pod") err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod) framework.ExpectNoError(err, "failed to delete pod") - - if tc.addExtendedResource { - removeExtendedResource(f.ClientSet, nodeName, fakeExtendedResource) - } }) } }