mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
e2e: topomgr: address reviewer comments
Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
833519f80b
commit
a249b93687
@ -182,29 +182,30 @@ type testEnvInfo struct {
|
||||
sriovResourceName string
|
||||
}
|
||||
|
||||
func containerWantsDevices(cnt *v1.Container, envInfo testEnvInfo) bool {
|
||||
func containerWantsDevices(cnt *v1.Container, envInfo *testEnvInfo) bool {
|
||||
_, found := cnt.Resources.Requests[v1.ResourceName(envInfo.sriovResourceName)]
|
||||
return found
|
||||
}
|
||||
|
||||
func checkNUMAAlignment(f *framework.Framework, pod *v1.Pod, cnt *v1.Container, logs string, envInfo testEnvInfo) (numaPodResources, error) {
|
||||
func checkNUMAAlignment(f *framework.Framework, pod *v1.Pod, cnt *v1.Container, logs string, envInfo *testEnvInfo) (*numaPodResources, error) {
|
||||
var err error
|
||||
podEnv, err := makeEnvMap(logs)
|
||||
if err != nil {
|
||||
return numaPodResources{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
CPUToNUMANode, err := getCPUToNUMANodeMapFromEnv(f, pod, cnt, podEnv, envInfo.numaNodes)
|
||||
if err != nil {
|
||||
return numaPodResources{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
PCIDevsToNUMANode, err := getPCIDeviceToNumaNodeMapFromEnv(f, pod, cnt, podEnv)
|
||||
if err != nil {
|
||||
return numaPodResources{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerWantsDevices(cnt, envInfo) && len(PCIDevsToNUMANode) == 0 {
|
||||
return numaPodResources{}, fmt.Errorf("no PCI devices found in environ")
|
||||
return nil, fmt.Errorf("no PCI devices found in environ")
|
||||
}
|
||||
numaRes := numaPodResources{
|
||||
CPUToNUMANode: CPUToNUMANode,
|
||||
@ -212,9 +213,9 @@ func checkNUMAAlignment(f *framework.Framework, pod *v1.Pod, cnt *v1.Container,
|
||||
}
|
||||
aligned := numaRes.CheckAlignment()
|
||||
if !aligned {
|
||||
return numaRes, fmt.Errorf("NUMA resources not aligned")
|
||||
err = fmt.Errorf("NUMA resources not aligned")
|
||||
}
|
||||
return numaRes, nil
|
||||
return &numaRes, err
|
||||
}
|
||||
|
||||
type pciDeviceInfo struct {
|
||||
|
@ -297,7 +297,7 @@ func findSRIOVResource(node *v1.Node) (string, int64) {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo testEnvInfo) {
|
||||
func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo *testEnvInfo) {
|
||||
for _, cnt := range pod.Spec.Containers {
|
||||
ginkgo.By(fmt.Sprintf("validating the container %s on Gu pod %s", cnt.Name, pod.Name))
|
||||
|
||||
@ -306,7 +306,10 @@ func validatePodAlignment(f *framework.Framework, pod *v1.Pod, envInfo testEnvIn
|
||||
|
||||
framework.Logf("got pod logs: %v", logs)
|
||||
numaRes, err := checkNUMAAlignment(f, pod, &cnt, logs, envInfo)
|
||||
framework.ExpectNoError(err, "NUMA Alignment check failed for [%s] of pod [%s]: %s", cnt.Name, pod.Name, numaRes.String())
|
||||
framework.ExpectNoError(err, "NUMA Alignment check failed for [%s] of pod [%s]", cnt.Name, pod.Name)
|
||||
if numaRes != nil {
|
||||
framework.Logf("NUMA resources for %s/%s: %s", pod.Name, cnt.Name, numaRes.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -553,7 +556,7 @@ func waitForAllContainerRemoval(podName, podNS string) {
|
||||
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
}
|
||||
|
||||
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs []tmCtnAttribute, envInfo testEnvInfo) {
|
||||
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
var pods []*v1.Pod
|
||||
|
||||
for podID := 0; podID < numPods; podID++ {
|
||||
@ -578,7 +581,7 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
|
||||
}
|
||||
}
|
||||
|
||||
func runTopologyManagerNegativeTest(f *framework.Framework, numPods int, ctnAttrs []tmCtnAttribute, envInfo testEnvInfo) {
|
||||
func runTopologyManagerNegativeTest(f *framework.Framework, numPods int, ctnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||
podName := "gu-pod"
|
||||
framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
|
||||
pod := makeTopologyManagerTestPod(podName, numalignCmd, ctnAttrs)
|
||||
@ -636,7 +639,7 @@ type sriovData struct {
|
||||
resourceAmount int64
|
||||
}
|
||||
|
||||
func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) sriovData {
|
||||
func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sriovData {
|
||||
var err error
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", metav1.NamespaceSystem, configMap.Name))
|
||||
@ -670,7 +673,7 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) sri
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
framework.Logf("Successfully created device plugin pod, detected %d SRIOV device %q", sriovResourceAmount, sriovResourceName)
|
||||
|
||||
return sriovData{
|
||||
return &sriovData{
|
||||
configMap: configMap,
|
||||
serviceAccount: serviceAccount,
|
||||
pod: dpPod,
|
||||
@ -679,7 +682,7 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) sri
|
||||
}
|
||||
}
|
||||
|
||||
func teardownSRIOVConfigOrFail(f *framework.Framework, sd sriovData) {
|
||||
func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||
var err error
|
||||
gp := int64(0)
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
@ -707,7 +710,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
|
||||
}
|
||||
|
||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||
envInfo := testEnvInfo{
|
||||
envInfo := &testEnvInfo{
|
||||
numaNodes: numaNodes,
|
||||
sriovResourceName: sd.resourceName,
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user