mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
test: parity between cluster and node IPPR e2e tests
Some IPPR cluster e2e tests are missing from node e2e tests. This change brings parity between them.
This commit is contained in:
parent
d67e6545b1
commit
6203006348
@ -54,6 +54,8 @@ const (
|
|||||||
Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
|
Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
|
||||||
CPUPeriod string = "100000"
|
CPUPeriod string = "100000"
|
||||||
MinContainerRuntimeVersion string = "1.6.9"
|
MinContainerRuntimeVersion string = "1.6.9"
|
||||||
|
|
||||||
|
fakeExtendedResource = "dummy.com/dummy"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -70,12 +72,15 @@ type ContainerResources struct {
|
|||||||
MemLim string
|
MemLim string
|
||||||
EphStorReq string
|
EphStorReq string
|
||||||
EphStorLim string
|
EphStorLim string
|
||||||
|
ExtendedResourceReq string
|
||||||
|
ExtendedResourceLim string
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContainerAllocations struct {
|
type ContainerAllocations struct {
|
||||||
CPUAlloc string
|
CPUAlloc string
|
||||||
MemAlloc string
|
MemAlloc string
|
||||||
ephStorAlloc string
|
ephStorAlloc string
|
||||||
|
ExtendedResourceAlloc string
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestContainerInfo struct {
|
type TestContainerInfo struct {
|
||||||
@ -87,6 +92,28 @@ type TestContainerInfo struct {
|
|||||||
RestartCount int32
|
RestartCount int32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type containerPatch struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Resources struct {
|
||||||
|
Requests struct {
|
||||||
|
CPU string `json:"cpu,omitempty"`
|
||||||
|
Memory string `json:"memory,omitempty"`
|
||||||
|
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||||
|
} `json:"requests"`
|
||||||
|
Limits struct {
|
||||||
|
CPU string `json:"cpu,omitempty"`
|
||||||
|
Memory string `json:"memory,omitempty"`
|
||||||
|
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||||
|
} `json:"limits"`
|
||||||
|
} `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type patchSpec struct {
|
||||||
|
Spec struct {
|
||||||
|
Containers []containerPatch `json:"containers"`
|
||||||
|
} `json:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
func supportsInPlacePodVerticalScaling(ctx context.Context, f *framework.Framework) bool {
|
func supportsInPlacePodVerticalScaling(ctx context.Context, f *framework.Framework) bool {
|
||||||
node := getLocalNode(ctx, f)
|
node := getLocalNode(ctx, f)
|
||||||
re := regexp.MustCompile("containerd://(.*)")
|
re := regexp.MustCompile("containerd://(.*)")
|
||||||
@ -418,6 +445,100 @@ func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, c cl
|
|||||||
return resizedPod
|
return resizedPod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genPatchString(containers []TestContainerInfo) (string, error) {
|
||||||
|
var patch patchSpec
|
||||||
|
|
||||||
|
for _, container := range containers {
|
||||||
|
var cPatch containerPatch
|
||||||
|
cPatch.Name = container.Name
|
||||||
|
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
|
||||||
|
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
||||||
|
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
||||||
|
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
||||||
|
|
||||||
|
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
patchBytes, err := json.Marshal(patch)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(patchBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error {
|
||||||
|
oldData, err := json.Marshal(old)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newData, err := json.Marshal(new)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err)
|
||||||
|
}
|
||||||
|
_, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func addExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string, extendedResourceQuantity resource.Quantity) {
|
||||||
|
extendedResource := v1.ResourceName(extendedResourceName)
|
||||||
|
|
||||||
|
ginkgo.By("Adding a custom resource")
|
||||||
|
OriginalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
node := OriginalNode.DeepCopy()
|
||||||
|
node.Status.Capacity[extendedResource] = extendedResourceQuantity
|
||||||
|
node.Status.Allocatable[extendedResource] = extendedResourceQuantity
|
||||||
|
err = patchNode(context.Background(), clientSet, OriginalNode.DeepCopy(), node)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
gomega.Eventually(func() error {
|
||||||
|
node, err = clientSet.CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
fakeResourceCapacity, exists := node.Status.Capacity[extendedResource]
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("node %s has no %s resource capacity", node.Name, extendedResourceName)
|
||||||
|
}
|
||||||
|
if expectedResource := resource.MustParse("123"); fakeResourceCapacity.Cmp(expectedResource) != 0 {
|
||||||
|
return fmt.Errorf("node %s has resource capacity %s, expected: %s", node.Name, fakeResourceCapacity.String(), expectedResource.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string) {
|
||||||
|
extendedResource := v1.ResourceName(extendedResourceName)
|
||||||
|
|
||||||
|
ginkgo.By("Removing a custom resource")
|
||||||
|
originalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
node := originalNode.DeepCopy()
|
||||||
|
delete(node.Status.Capacity, extendedResource)
|
||||||
|
delete(node.Status.Allocatable, extendedResource)
|
||||||
|
err = patchNode(context.Background(), clientSet, originalNode.DeepCopy(), node)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
gomega.Eventually(func() error {
|
||||||
|
node, err = clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
if _, exists := node.Status.Capacity[extendedResource]; exists {
|
||||||
|
return fmt.Errorf("node %s has resource capacity %s which is expected to be removed", node.Name, extendedResourceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
func doPodResizeTests() {
|
func doPodResizeTests() {
|
||||||
f := framework.NewDefaultFramework("pod-resize-test")
|
f := framework.NewDefaultFramework("pod-resize-test")
|
||||||
var podClient *e2epod.PodClient
|
var podClient *e2epod.PodClient
|
||||||
@ -430,6 +551,7 @@ func doPodResizeTests() {
|
|||||||
containers []TestContainerInfo
|
containers []TestContainerInfo
|
||||||
patchString string
|
patchString string
|
||||||
expected []TestContainerInfo
|
expected []TestContainerInfo
|
||||||
|
addExtendedResource bool
|
||||||
}
|
}
|
||||||
|
|
||||||
noRestart := v1.NotRequired
|
noRestart := v1.NotRequired
|
||||||
@ -1131,6 +1253,31 @@ func doPodResizeTests() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource",
|
||||||
|
containers: []TestContainerInfo{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi",
|
||||||
|
ExtendedResourceReq: "1", ExtendedResourceLim: "1"},
|
||||||
|
CPUPolicy: &noRestart,
|
||||||
|
MemPolicy: &noRestart,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
patchString: `{"spec":{"containers":[
|
||||||
|
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
|
||||||
|
]}}`,
|
||||||
|
expected: []TestContainerInfo{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi",
|
||||||
|
ExtendedResourceReq: "1", ExtendedResourceLim: "1"},
|
||||||
|
CPUPolicy: &noRestart,
|
||||||
|
MemPolicy: &noRestart,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
addExtendedResource: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
timeouts := framework.NewTimeoutContext()
|
timeouts := framework.NewTimeoutContext()
|
||||||
@ -1153,6 +1300,20 @@ func doPodResizeTests() {
|
|||||||
testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
|
testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
|
||||||
testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
|
testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
|
||||||
|
|
||||||
|
if tc.addExtendedResource {
|
||||||
|
nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
addExtendedResource(f.ClientSet, node.Name, fakeExtendedResource, resource.MustParse("123"))
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
removeExtendedResource(f.ClientSet, node.Name, fakeExtendedResource)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("creating pod")
|
ginkgo.By("creating pod")
|
||||||
newPod := podClient.CreateSync(ctx, testPod)
|
newPod := podClient.CreateSync(ctx, testPod)
|
||||||
|
|
||||||
@ -1161,41 +1322,49 @@ func doPodResizeTests() {
|
|||||||
ginkgo.By("verifying initial pod resize policy is as expected")
|
ginkgo.By("verifying initial pod resize policy is as expected")
|
||||||
verifyPodResizePolicy(newPod, tc.containers)
|
verifyPodResizePolicy(newPod, tc.containers)
|
||||||
|
|
||||||
err := e2epod.WaitForPodCondition(ctx, f.ClientSet, newPod.Namespace, newPod.Name, "Ready", timeouts.PodStartShort, testutils.PodRunningReady)
|
|
||||||
framework.ExpectNoError(err, "pod %s/%s did not go running", newPod.Namespace, newPod.Name)
|
|
||||||
framework.Logf("pod %s/%s running", newPod.Namespace, newPod.Name)
|
|
||||||
|
|
||||||
ginkgo.By("verifying initial pod status resources")
|
ginkgo.By("verifying initial pod status resources")
|
||||||
verifyPodStatusResources(newPod, tc.containers)
|
verifyPodStatusResources(newPod, tc.containers)
|
||||||
|
|
||||||
ginkgo.By("patching pod for resize")
|
ginkgo.By("verifying initial cgroup config are as expected")
|
||||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(ctx, newPod.Name,
|
framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
|
||||||
types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
|
|
||||||
framework.ExpectNoError(pErr, "failed to patch pod for resize")
|
|
||||||
|
|
||||||
ginkgo.By("verifying pod patched for resize")
|
patchAndVerify := func(patchString string, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, opStr string, isRollback bool) {
|
||||||
verifyPodResources(patchedPod, tc.expected)
|
ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
|
||||||
|
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
||||||
|
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
|
||||||
|
framework.ExpectNoError(pErr, fmt.Sprintf("failed to patch pod for %s", opStr))
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
|
||||||
|
verifyPodResources(patchedPod, expectedContainers)
|
||||||
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||||
WithArguments(patchedPod, tc.containers).
|
WithArguments(patchedPod, initialContainers).
|
||||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||||
|
|
||||||
ginkgo.By("waiting for resize to be actuated")
|
ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
|
||||||
resizedPod := waitForPodResizeActuation(ctx, f, f.ClientSet, podClient, newPod, patchedPod, tc.expected)
|
resizedPod := waitForPodResizeActuation(ctx, f, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
|
||||||
|
|
||||||
ginkgo.By("verifying pod resources after resize")
|
// Check cgroup values only for containerd versions before 1.6.9
|
||||||
verifyPodResources(resizedPod, tc.expected)
|
ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
|
||||||
|
framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers))
|
||||||
|
|
||||||
ginkgo.By("verifying pod allocations after resize")
|
ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
|
||||||
|
verifyPodResources(resizedPod, expectedContainers)
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
|
||||||
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||||
WithArguments(resizedPod, tc.expected).
|
WithArguments(resizedPod, expectedContainers).
|
||||||
Should(gomega.BeNil(), "failed to verify Pod allocations for resizedPod")
|
Should(gomega.BeNil(), "failed to verify Pod allocations for resizedPod")
|
||||||
|
}
|
||||||
|
|
||||||
|
patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
|
||||||
|
|
||||||
|
rbPatchStr, err := genPatchString(tc.containers)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
// Resize has been actuated, test rollback
|
||||||
|
patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
|
||||||
|
|
||||||
ginkgo.By("deleting pod")
|
ginkgo.By("deleting pod")
|
||||||
deletePodSyncByName(ctx, f, newPod.Name)
|
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
|
||||||
// we need to wait for all containers to really be gone so cpumanager reconcile loop will not rewrite the cpu_manager_state.
|
|
||||||
// this is in turn needed because we will have an unavoidable (in the current framework) race with the
|
|
||||||
// reconcile loop which will make our attempt to delete the state file and to restore the old config go haywire
|
|
||||||
waitForAllContainerRemoval(ctx, newPod.Name, newPod.Namespace)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1286,11 +1455,8 @@ func doPodResizeErrorTests() {
|
|||||||
WithArguments(patchedPod, tc.expected).
|
WithArguments(patchedPod, tc.expected).
|
||||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||||
|
|
||||||
deletePodSyncByName(ctx, f, newPod.Name)
|
ginkgo.By("deleting pod")
|
||||||
// we need to wait for all containers to really be gone so cpumanager reconcile loop will not rewrite the cpu_manager_state.
|
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
|
||||||
// this is in turn needed because we will have an unavoidable (in the current framework) race with the
|
|
||||||
// reconcile loop which will make our attempt to delete the state file and to restore the old config go haywire
|
|
||||||
waitForAllContainerRemoval(ctx, newPod.Name, newPod.Namespace)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1301,7 +1467,7 @@ func doPodResizeErrorTests() {
|
|||||||
// b) api-server in services doesn't start with --enable-admission-plugins=ResourceQuota
|
// b) api-server in services doesn't start with --enable-admission-plugins=ResourceQuota
|
||||||
// and is not possible to start it from TEST_ARGS
|
// and is not possible to start it from TEST_ARGS
|
||||||
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
|
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
|
||||||
// in test/node/pod_resize_test.go
|
// in test/e2e/node/pod_resize.go
|
||||||
|
|
||||||
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() {
|
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() {
|
||||||
if !podOnCgroupv2Node {
|
if !podOnCgroupv2Node {
|
||||||
|
Loading…
Reference in New Issue
Block a user