excludesharedpool: e2e tests: code refactor to handle non-integral CPUs

This patch changes cpuCount to cpuRequest in order to cater to cases
where guaranteed pods make non-integral CPU Requests.

Signed-off-by: Swati Sehgal <swsehgal@redhat.com>
This commit is contained in:
Swati Sehgal 2021-06-16 17:44:01 +01:00
parent 5d9032007a
commit 42dd01aa3f

View File

@ -53,7 +53,7 @@ type podDesc struct {
cntName string
resourceName string
resourceAmount int
cpuCount int
cpuRequest int // cpuRequest is in millicores
}
func makePodResourcesTestPod(desc podDesc) *v1.Pod {
@ -66,9 +66,10 @@ func makePodResourcesTestPod(desc podDesc) *v1.Pod {
},
Command: []string{"sh", "-c", "sleep 1d"},
}
if desc.cpuCount > 0 {
cnt.Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount))
cnt.Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%d", desc.cpuCount))
if desc.cpuRequest > 0 {
cpuRequestQty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
cnt.Resources.Requests[v1.ResourceCPU] = *cpuRequestQty
cnt.Resources.Limits[v1.ResourceCPU] = *cpuRequestQty
// we don't really care, we only need to be in guaranteed QoS
cnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi")
cnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi")
@ -186,13 +187,11 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
if !ok {
return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, podReq.cntName)
}
if podReq.cpuCount > 0 {
if len(cntInfo.CpuIds) != podReq.cpuCount {
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuCount, cntInfo.CpuIds)
if podReq.cpuRequest > 0 {
if isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != int(podReq.cpuRequest) {
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
}
}
if podReq.resourceName != "" && podReq.resourceAmount > 0 {
dev := findContainerDeviceByName(cntInfo.GetDevices(), podReq.resourceName)
if dev == nil {
@ -288,19 +287,19 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-03",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
},
}
} else {
@ -310,19 +309,19 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
},
}
@ -344,12 +343,12 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
}
} else {
@ -359,14 +358,14 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 2000,
},
{
podName: "pod-02",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-02",
cntName: "cnt-00",
cpuRequest: 2000,
},
}
}
@ -380,13 +379,13 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
}
} else {
extra = podDesc{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
}
}
@ -405,16 +404,16 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
if sd != nil {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-01",
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 2,
cpuRequest: 2000,
},
{
podName: "pod-02",
@ -425,29 +424,29 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
cntName: "cnt-00",
resourceName: sd.resourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
},
}
} else {
expected = []podDesc{
{
podName: "pod-00",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-00",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-01",
cntName: "cnt-00",
cpuCount: 2,
podName: "pod-01",
cntName: "cnt-00",
cpuRequest: 1000,
},
{
podName: "pod-02",
cntName: "cnt-00",
},
{
podName: "pod-03",
cntName: "cnt-00",
cpuCount: 1,
podName: "pod-03",
cntName: "cnt-00",
cpuRequest: 1000,
},
}
}
@ -720,7 +719,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
cntName: "cnt-01",
resourceName: KubeVirtResourceName,
resourceAmount: 1,
cpuCount: 1,
cpuRequest: 1000,
}
tpd := newTestPodData()
@ -907,3 +906,7 @@ func getKubeVirtDevicePluginPod() *v1.Pod {
return p
}
func isIntegral(cpuRequest int) bool {
return (cpuRequest % 1000) == 0
}