e2e: node: podresources: add internal helpers

the intent is to make the code more readable, no intended
changes in behaviour. Now it should be a bit more explicit
why the code is checking some values.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2021-11-03 12:53:59 +01:00
parent 927914dec7
commit a6e8f7530a

View File

@ -54,6 +54,28 @@ type podDesc struct {
cpuRequest int // cpuRequest is in millicores
}
func (desc podDesc) CpuRequestQty() resource.Quantity {
qty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
return *qty
}
func (desc podDesc) CpuRequestExclusive() int {
if (desc.cpuRequest % 1000) != 0 {
// exclusive cpus are request only if the quantity is integral;
// hence, explicitely rule out non-integral requests
return 0
}
return desc.cpuRequest / 1000
}
func (desc podDesc) RequiresCPU() bool {
return desc.cpuRequest > 0
}
func (desc podDesc) RequiresDevices() bool {
return desc.resourceName != "" && desc.resourceAmount > 0
}
func makePodResourcesTestPod(desc podDesc) *v1.Pod {
cnt := v1.Container{
Name: desc.cntName,
@ -64,15 +86,15 @@ func makePodResourcesTestPod(desc podDesc) *v1.Pod {
},
Command: []string{"sh", "-c", "sleep 1d"},
}
if desc.cpuRequest > 0 {
cpuRequestQty := resource.NewMilliQuantity(int64(desc.cpuRequest), resource.DecimalSI)
cnt.Resources.Requests[v1.ResourceCPU] = *cpuRequestQty
cnt.Resources.Limits[v1.ResourceCPU] = *cpuRequestQty
if desc.RequiresCPU() {
cpuRequestQty := desc.CpuRequestQty()
cnt.Resources.Requests[v1.ResourceCPU] = cpuRequestQty
cnt.Resources.Limits[v1.ResourceCPU] = cpuRequestQty
// we don't really care, we only need to be in guaranteed QoS
cnt.Resources.Requests[v1.ResourceMemory] = resource.MustParse("100Mi")
cnt.Resources.Limits[v1.ResourceMemory] = resource.MustParse("100Mi")
}
if desc.resourceName != "" && desc.resourceAmount > 0 {
if desc.RequiresDevices() {
cnt.Resources.Requests[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount))
cnt.Resources.Limits[v1.ResourceName(desc.resourceName)] = resource.MustParse(fmt.Sprintf("%d", desc.resourceAmount))
}
@ -185,7 +207,7 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
if !ok {
return fmt.Errorf("no container resources for pod %q container %q", podReq.podName, podReq.cntName)
}
if podReq.cpuRequest > 0 {
if podReq.RequiresCPU() {
if isIntegral(podReq.cpuRequest) && len(cntInfo.CpuIds) != int(podReq.cpuRequest) {
return fmt.Errorf("pod %q container %q expected %d cpus got %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
}
@ -193,7 +215,7 @@ func matchPodDescWithResources(expected []podDesc, found podResMap) error {
return fmt.Errorf("pod %q container %q requested %d expected to be allocated CPUs from shared pool %v", podReq.podName, podReq.cntName, podReq.cpuRequest, cntInfo.CpuIds)
}
}
if podReq.resourceName != "" && podReq.resourceAmount > 0 {
if podReq.RequiresDevices() {
dev := findContainerDeviceByName(cntInfo.GetDevices(), podReq.resourceName)
if dev == nil {
return fmt.Errorf("pod %q container %q expected data for resource %q not found", podReq.podName, podReq.cntName, podReq.resourceName)