mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
podresource: move the checkForTopology logic inline
As per the recommendation here: https://github.com/kubernetes/kubernetes/pull/103289#pullrequestreview-766949859 we move the check inline. Signed-off-by: Swati Sehgal <swsehgal@redhat.com>
This commit is contained in:
parent
bb81101570
commit
9337902648
@ -174,27 +174,7 @@ func findContainerDeviceByName(devs []*kubeletpodresourcesv1.ContainerDevices, r
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type deviceCheckFunc func(expect podDesc, found podResMap) error
|
func matchPodDescWithResources(expected []podDesc, found podResMap) error {
|
||||||
|
|
||||||
func checkForTopology(expect podDesc, found podResMap) error {
|
|
||||||
if cnts, ok := found[KubeVirtResourceName]; ok {
|
|
||||||
for _, cnt := range cnts {
|
|
||||||
for _, cd := range cnt.GetDevices() {
|
|
||||||
if cd.ResourceName != KubeVirtResourceName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cd.Topology != nil {
|
|
||||||
//we expect nil topology
|
|
||||||
return fmt.Errorf("Nil topology is expected")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func matchPodDescWithResources(expected []podDesc, found podResMap, deviceCheck deviceCheckFunc) error {
|
|
||||||
for _, podReq := range expected {
|
for _, podReq := range expected {
|
||||||
framework.Logf("matching: %#v", podReq)
|
framework.Logf("matching: %#v", podReq)
|
||||||
|
|
||||||
@ -227,20 +207,28 @@ func matchPodDescWithResources(expected []podDesc, found podResMap, deviceCheck
|
|||||||
return fmt.Errorf("pod %q container %q expected no resources, got %v", podReq.podName, podReq.cntName, devs)
|
return fmt.Errorf("pod %q container %q expected no resources, got %v", podReq.podName, podReq.cntName, devs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if deviceCheck != nil {
|
if cnts, ok := found[KubeVirtResourceName]; ok {
|
||||||
err := deviceCheck(podReq, found)
|
for _, cnt := range cnts {
|
||||||
if err != nil {
|
for _, cd := range cnt.GetDevices() {
|
||||||
return err
|
if cd.ResourceName != KubeVirtResourceName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cd.Topology != nil {
|
||||||
|
//we expect nil topology
|
||||||
|
return fmt.Errorf("Nil topology is expected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectPodResources(offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc, cf deviceCheckFunc) {
|
func expectPodResources(offset int, cli kubeletpodresourcesv1.PodResourcesListerClient, expected []podDesc) {
|
||||||
gomega.EventuallyWithOffset(1+offset, func() error {
|
gomega.EventuallyWithOffset(1+offset, func() error {
|
||||||
found := getPodResources(cli)
|
found := getPodResources(cli)
|
||||||
return matchPodDescWithResources(expected, found, cf)
|
return matchPodDescWithResources(expected, found)
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +272,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tpd.createPodsForTest(f, expected)
|
tpd.createPodsForTest(f, expected)
|
||||||
expectPodResources(1, cli, expected, nil)
|
expectPodResources(1, cli, expected)
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
|
|
||||||
tpd = newTestPodData()
|
tpd = newTestPodData()
|
||||||
@ -340,7 +328,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
|
|
||||||
}
|
}
|
||||||
tpd.createPodsForTest(f, expected)
|
tpd.createPodsForTest(f, expected)
|
||||||
expectPodResources(1, cli, expected, nil)
|
expectPodResources(1, cli, expected)
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
|
|
||||||
tpd = newTestPodData()
|
tpd = newTestPodData()
|
||||||
@ -384,7 +372,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
}
|
}
|
||||||
|
|
||||||
tpd.createPodsForTest(f, expected)
|
tpd.createPodsForTest(f, expected)
|
||||||
expectPodResources(1, cli, expected, nil)
|
expectPodResources(1, cli, expected)
|
||||||
|
|
||||||
if sd != nil {
|
if sd != nil {
|
||||||
extra = podDesc{
|
extra = podDesc{
|
||||||
@ -408,7 +396,7 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
})
|
})
|
||||||
|
|
||||||
expected = append(expected, extra)
|
expected = append(expected, extra)
|
||||||
expectPodResources(1, cli, expected, nil)
|
expectPodResources(1, cli, expected)
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
|
|
||||||
tpd = newTestPodData()
|
tpd = newTestPodData()
|
||||||
@ -464,11 +452,11 @@ func podresourcesListTests(f *framework.Framework, cli kubeletpodresourcesv1.Pod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
tpd.createPodsForTest(f, expected)
|
tpd.createPodsForTest(f, expected)
|
||||||
expectPodResources(1, cli, expected, nil)
|
expectPodResources(1, cli, expected)
|
||||||
|
|
||||||
tpd.deletePod(f, "pod-01")
|
tpd.deletePod(f, "pod-01")
|
||||||
expectedPostDelete := filterOutDesc(expected, "pod-01")
|
expectedPostDelete := filterOutDesc(expected, "pod-01")
|
||||||
expectPodResources(1, cli, expectedPostDelete, nil)
|
expectPodResources(1, cli, expectedPostDelete)
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -746,12 +734,12 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
desc,
|
desc,
|
||||||
})
|
})
|
||||||
|
|
||||||
expectPodResources(1, cli, []podDesc{desc}, checkForTopology)
|
expectPodResources(1, cli, []podDesc{desc})
|
||||||
|
|
||||||
ginkgo.By("Restarting Kubelet")
|
ginkgo.By("Restarting Kubelet")
|
||||||
restartKubelet()
|
restartKubelet()
|
||||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
||||||
expectPodResources(1, cli, []podDesc{desc}, checkForTopology)
|
expectPodResources(1, cli, []podDesc{desc})
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user