Refactor: helper functions and quantity improvement

- Add getSleepingPod() helper function
- Refactor: quantity functions to return resource.quantity instead of int64
- Improve helper functions for memory capacity

Signed-off-by: Itamar Holder <iholder@redhat.com>
This commit is contained in:
Itamar Holder 2024-02-28 10:32:00 +02:00
parent 13403e836a
commit 2230ed7dc6

View File

@ -81,7 +81,7 @@ var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, func() {
}) })
}) })
// Note that memoryRequestEqualLimit is effective only when qosClass is PodQOSBestEffort. // Note that memoryRequestEqualLimit is effective only when qosClass is not PodQOSBestEffort.
func getSwapTestPod(f *framework.Framework, qosClass v1.PodQOSClass, memoryRequestEqualLimit bool) *v1.Pod { func getSwapTestPod(f *framework.Framework, qosClass v1.PodQOSClass, memoryRequestEqualLimit bool) *v1.Pod {
podMemoryAmount := resource.MustParse("128Mi") podMemoryAmount := resource.MustParse("128Mi")
@ -109,10 +109,16 @@ func getSwapTestPod(f *framework.Framework, qosClass v1.PodQOSClass, memoryReque
resources.Requests = resources.Limits resources.Requests = resources.Limits
} }
pod := &v1.Pod{ pod := getSleepingPod(f.Namespace.Name)
return pod
}
func getSleepingPod(namespace string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-swap-" + rand.String(5), Name: "sleeping-test-pod-swap-" + rand.String(5),
Namespace: f.Namespace.Name, Namespace: namespace,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
@ -121,13 +127,10 @@ func getSwapTestPod(f *framework.Framework, qosClass v1.PodQOSClass, memoryReque
Name: "busybox-container", Name: "busybox-container",
Image: busyboxImage, Image: busyboxImage,
Command: []string{"sleep", "600"}, Command: []string{"sleep", "600"},
Resources: resources,
}, },
}, },
}, },
} }
return pod
} }
func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod { func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod {
@ -191,44 +194,41 @@ func expectLimitedSwap(f *framework.Framework, pod *v1.Pod, expectedSwapLimit in
) )
} }
func getSwapCapacity(f *framework.Framework, pod *v1.Pod) int64 { func getSwapCapacity(f *framework.Framework, pod *v1.Pod) *resource.Quantity {
output := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-ec", "free -b | grep Swap | xargs | cut -d\" \" -f2") output := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-ec", "free -b | grep Swap | xargs | cut -d\" \" -f2")
swapCapacity, err := strconv.Atoi(output) swapCapacityBytes, err := strconv.Atoi(output)
framework.ExpectNoError(err, "cannot convert swap size to int") framework.ExpectNoError(err, "cannot convert swap size to int")
ginkgo.By(fmt.Sprintf("providing swap capacity: %d", swapCapacity)) ginkgo.By(fmt.Sprintf("providing swap capacity: %d", swapCapacityBytes))
return int64(swapCapacity) return resource.NewQuantity(int64(swapCapacityBytes), resource.BinarySI)
} }
func getMemoryCapacity(f *framework.Framework, pod *v1.Pod) int64 { func getMemoryCapacity(f *framework.Framework, nodeName string) (memCapacity, usedMemory *resource.Quantity) {
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed listing nodes") framework.ExpectNoError(err, fmt.Sprintf("failed getting node %s", nodeName))
for _, node := range nodes.Items { nodeOrigCapacity := node.Status.Capacity[v1.ResourceMemory]
if node.Name != pod.Spec.NodeName { memCapacity = cloneQuantity(nodeOrigCapacity)
continue usedMemory = cloneQuantity(nodeOrigCapacity)
}
memCapacity := node.Status.Capacity[v1.ResourceMemory] usedMemory.Sub(node.Status.Allocatable[v1.ResourceMemory])
return memCapacity.Value() return
}
framework.ExpectNoError(fmt.Errorf("node %s wasn't found", pod.Spec.NodeName))
return 0
} }
func calcSwapForBurstablePod(f *framework.Framework, pod *v1.Pod) int64 { func calcSwapForBurstablePod(f *framework.Framework, pod *v1.Pod) int64 {
nodeMemoryCapacity := getMemoryCapacity(f, pod) gomega.Expect(pod.Spec.NodeName).ToNot(gomega.BeEmpty(), "pod node name is empty")
nodeSwapCapacity := getSwapCapacity(f, pod)
nodeMemoryCapacityQuantity, _ := getMemoryCapacity(f, pod.Spec.NodeName)
nodeMemoryCapacity := nodeMemoryCapacityQuantity.Value()
nodeSwapCapacity := getSwapCapacity(f, pod).Value()
containerMemoryRequest := pod.Spec.Containers[0].Resources.Requests.Memory().Value() containerMemoryRequest := pod.Spec.Containers[0].Resources.Requests.Memory().Value()
containerMemoryProportion := float64(containerMemoryRequest) / float64(nodeMemoryCapacity) containerMemoryProportion := float64(containerMemoryRequest) / float64(nodeMemoryCapacity)
swapAllocation := containerMemoryProportion * float64(nodeSwapCapacity) swapAllocation := containerMemoryProportion * float64(nodeSwapCapacity)
ginkgo.By(fmt.Sprintf("Calculating swap for burstable pods: nodeMemoryCapacity: %d, nodeSwapCapacity: %d, containerMemoryRequest: %d, swapAllocation: %d", ginkgo.By(fmt.Sprintf("Calculating swap for burstable pods: nodeMemoryCapacity: %d, nodeSwapCapacity: %d, containerMemoryRequest: %d, swapAllocation: %d",
nodeMemoryCapacity, nodeSwapCapacity, containerMemoryRequest, int64(swapAllocation))) nodeMemoryCapacity, nodeSwapCapacity, containerMemoryRequest, int64(swapAllocation)))
return int64(swapAllocation) return int64(swapAllocation)
} }
@ -245,3 +245,8 @@ func isNoSwap(f *framework.Framework, pod *v1.Pod) bool {
return kubeletCfg.MemorySwap.SwapBehavior == types.NoSwap || kubeletCfg.MemorySwap.SwapBehavior == "" return kubeletCfg.MemorySwap.SwapBehavior == types.NoSwap || kubeletCfg.MemorySwap.SwapBehavior == ""
} }
func cloneQuantity(resource resource.Quantity) *resource.Quantity {
clone := resource.DeepCopy()
return &clone
}