mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Refactor CPUMananger-e2e-tests so that it be reused by topology-manager-e2e-testsuite.
Signed-off-by: Deepthi Dharwar <ddharwar@redhat.com>
This commit is contained in:
parent
1dd25a9efc
commit
4abbce4549
@ -258,50 +258,14 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old
|
||||
return oldCfg
|
||||
}
|
||||
|
||||
func runCPUManagerTests(f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc int64
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1, cpu2 int
|
||||
var cset cpuset.CPUSet
|
||||
var err error
|
||||
func runGuPodTest(f *framework.Framework) {
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod, pod1, pod2 *v1.Pod
|
||||
var cpu1 int
|
||||
var err error
|
||||
var cpuList []int
|
||||
var pod *v1.Pod
|
||||
var expAllowedCPUsListRegex string
|
||||
|
||||
ginkgo.It("should assign CPUs as expected based on the Pod spec", func() {
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||
|
||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||
if cpuCap < 2 {
|
||||
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU capacity < 2")
|
||||
}
|
||||
|
||||
// Enable CPU Manager in the kubelet.
|
||||
oldCfg = enableCPUManagerInKubelet(f, true)
|
||||
|
||||
ginkgo.By("running a non-Gu pod")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "non-gu-container",
|
||||
cpuRequest: "100m",
|
||||
cpuLimit: "200m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
|
||||
ginkgo.By("running a Gu pod")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
@ -329,8 +293,45 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
}
|
||||
|
||||
func runNonGuPodTest(f *framework.Framework, cpuCap int64) {
|
||||
var ctnAttrs []ctnAttribute
|
||||
var err error
|
||||
var pod *v1.Pod
|
||||
var expAllowedCPUsListRegex string
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "non-gu-container",
|
||||
cpuRequest: "100m",
|
||||
cpuLimit: "200m",
|
||||
},
|
||||
}
|
||||
pod = makeCPUManagerPod("non-gu-pod", ctnAttrs)
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
expAllowedCPUsListRegex = fmt.Sprintf("^0-%d\n$", cpuCap-1)
|
||||
err = f.PodClient().MatchContainerOutput(pod.Name, pod.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod.Spec.Containers[0].Name, pod.Name)
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
}
|
||||
|
||||
func runMultipleGuNonGuPods(f *framework.Framework, cpuCap int64, cpuAlloc int64) {
|
||||
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1 int
|
||||
var cset cpuset.CPUSet
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod1, pod2 *v1.Pod
|
||||
|
||||
ginkgo.By("running multiple Gu and non-Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
@ -374,18 +375,20 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||
|
||||
// Skip rest of the tests if CPU capacity < 3.
|
||||
if cpuCap < 3 {
|
||||
e2eskipper.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
|
||||
}
|
||||
|
||||
ginkgo.By("running a Gu pod requesting multiple CPUs")
|
||||
func runMultipleCPUGuPod(f *framework.Framework) {
|
||||
var cpuListString, expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cset cpuset.CPUSet
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod *v1.Pod
|
||||
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container",
|
||||
@ -401,7 +404,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
if isMultiNUMA() {
|
||||
cpuList = cpuset.MustParse(getCoreSiblingList(0)).ToSlice()
|
||||
if !isHTEnabled() {
|
||||
cset = cpuset.MustParse(fmt.Sprintf("%d,%d", cpuList[1], cpuList[2]))
|
||||
cset = cpuset.MustParse(fmt.Sprintf("%d-%d", cpuList[1], cpuList[2]))
|
||||
} else {
|
||||
cset = cpuset.MustParse(getCPUSiblingList(int64(cpuList[1])))
|
||||
}
|
||||
@ -422,8 +425,16 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
}
|
||||
|
||||
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||
func runMultipleCPUContainersGuPod(f *framework.Framework) {
|
||||
|
||||
var expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1, cpu2 int
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod *v1.Pod
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container1",
|
||||
@ -467,8 +478,16 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
deletePods(f, []string{pod.Name})
|
||||
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
||||
}
|
||||
|
||||
func runMultipleGuPods(f *framework.Framework) {
|
||||
var expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1, cpu2 int
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod1, pod2 *v1.Pod
|
||||
|
||||
ginkgo.By("running multiple Gu pods")
|
||||
ctnAttrs = []ctnAttribute{
|
||||
{
|
||||
ctnName: "gu-container1",
|
||||
@ -513,11 +532,55 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
err = f.PodClient().MatchContainerOutput(pod2.Name, pod2.Spec.Containers[0].Name, expAllowedCPUsListRegex)
|
||||
framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
|
||||
pod2.Spec.Containers[0].Name, pod2.Name)
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||
}
|
||||
|
||||
func runCPUManagerTests(f *framework.Framework) {
|
||||
var cpuCap, cpuAlloc int64
|
||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||
var expAllowedCPUsListRegex string
|
||||
var cpuList []int
|
||||
var cpu1 int
|
||||
var err error
|
||||
var ctnAttrs []ctnAttribute
|
||||
var pod *v1.Pod
|
||||
|
||||
ginkgo.It("should assign CPUs as expected based on the Pod spec", func() {
|
||||
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f)
|
||||
|
||||
// Skip CPU Manager tests altogether if the CPU capacity < 2.
|
||||
if cpuCap < 2 {
|
||||
e2eskipper.Skipf("Skipping CPU Manager tests since the CPU capacity < 2")
|
||||
}
|
||||
|
||||
// Enable CPU Manager in the kubelet.
|
||||
oldCfg = enableCPUManagerInKubelet(f, true)
|
||||
|
||||
ginkgo.By("running a non-Gu pod")
|
||||
runNonGuPodTest(f, cpuCap)
|
||||
|
||||
ginkgo.By("running a Gu pod")
|
||||
runGuPodTest(f)
|
||||
|
||||
ginkgo.By("running multiple Gu and non-Gu pods")
|
||||
runMultipleGuNonGuPods(f, cpuCap, cpuAlloc)
|
||||
|
||||
// Skip rest of the tests if CPU capacity < 3.
|
||||
if cpuCap < 3 {
|
||||
e2eskipper.Skipf("Skipping rest of the CPU Manager tests since CPU capacity < 3")
|
||||
}
|
||||
|
||||
ginkgo.By("running a Gu pod requesting multiple CPUs")
|
||||
runMultipleCPUGuPod(f)
|
||||
|
||||
ginkgo.By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||
runMultipleCPUContainersGuPod(f)
|
||||
|
||||
ginkgo.By("running multiple Gu pods")
|
||||
runMultipleGuPods(f)
|
||||
|
||||
ginkgo.By("test for automatically remove inactive pods from cpumanager state file.")
|
||||
// First running a Gu Pod,
|
||||
|
Loading…
Reference in New Issue
Block a user