From 3818f6478096b64a3363bb8004bf7e8d8def0b37 Mon Sep 17 00:00:00 2001 From: Manjunath A Kumatagi Date: Sun, 10 Mar 2019 19:23:24 +0530 Subject: [PATCH] Refactor the node perf tests to run on non-amd64 cluster --- test/e2e_node/image_list.go | 4 +- test/e2e_node/node_perf_test.go | 111 +++++++++++++++---------- test/e2e_node/perf/workloads/npb_ep.go | 2 +- test/e2e_node/perf/workloads/npb_is.go | 2 +- 4 files changed, 70 insertions(+), 49 deletions(-) diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index a37c8bdf28d..1050eacd5d3 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -53,8 +53,8 @@ var NodeImageWhiteList = sets.NewString( imageutils.GetE2EImage(imageutils.Nonewprivs), imageutils.GetPauseImageName(), framework.GetGPUDevicePluginImage(), - "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0", - "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0", + "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0", + "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0", "gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0", ) diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index f4b602a3325..07c70f9a196 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -56,53 +56,74 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur // Slow by design. var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { f := framework.NewDefaultFramework("node-performance-testing") + var ( + wl workloads.NodePerfWorkload + oldCfg *kubeletconfig.KubeletConfiguration + newCfg *kubeletconfig.KubeletConfiguration + pod *corev1.Pod + ) + JustBeforeEach(func() { + err := wl.PreTestExec() + framework.ExpectNoError(err) + oldCfg, err = getCurrentKubeletConfig() + framework.ExpectNoError(err) + newCfg, err = wl.KubeletConfig(oldCfg) + framework.ExpectNoError(err) + setKubeletConfig(f, newCfg) + }) + + cleanup := func() { + gp := int64(0) + delOpts := metav1.DeleteOptions{ + GracePeriodSeconds: &gp, + } + f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout) + By("running the post test exec from the workload") + err := wl.PostTestExec() + framework.ExpectNoError(err) + setKubeletConfig(f, oldCfg) + } + + runWorkload := func() { + By("running the workload and waiting for success") + // Make the pod for the workload. + pod = makeNodePerfPod(wl) + // Create the pod. + pod = f.PodClient().CreateSync(pod) + // Wait for pod success. + f.PodClient().WaitForSuccess(pod.Name, wl.Timeout()) + podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + framework.ExpectNoError(err) + perf, err := wl.ExtractPerformanceFromLogs(podLogs) + framework.ExpectNoError(err) + framework.Logf("Time to complete workload %s: %v", wl.Name(), perf) + } Context("Run node performance testing with pre-defined workloads", func() { - It("run each pre-defined workload", func() { - By("running the workloads") - for _, workload := range workloads.NodePerfWorkloads { - By("running the pre test exec from the workload") - err := workload.PreTestExec() - framework.ExpectNoError(err) - - By("restarting kubelet with required configuration") - // Get the Kubelet config required for this workload. - oldCfg, err := getCurrentKubeletConfig() - framework.ExpectNoError(err) - - newCfg, err := workload.KubeletConfig(oldCfg) - framework.ExpectNoError(err) - // Set the Kubelet config required for this workload. - setKubeletConfig(f, newCfg) - - By("running the workload and waiting for success") - // Make the pod for the workload. - pod := makeNodePerfPod(workload) - - // Create the pod. - pod = f.PodClient().CreateSync(pod) - // Wait for pod success. - f.PodClient().WaitForSuccess(pod.Name, workload.Timeout()) - podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) - framework.ExpectNoError(err) - perf, err := workload.ExtractPerformanceFromLogs(podLogs) - framework.ExpectNoError(err) - framework.Logf("Time to complete workload %s: %v", workload.Name(), perf) - - // Delete the pod. - gp := int64(0) - delOpts := metav1.DeleteOptions{ - GracePeriodSeconds: &gp, - } - f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout) - - By("running the post test exec from the workload") - err = workload.PostTestExec() - framework.ExpectNoError(err) - - // Set the Kubelet config back to the old one. - setKubeletConfig(f, oldCfg) - } + BeforeEach(func() { + wl = workloads.NodePerfWorkloads[0] + }) + It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() { + defer cleanup() + runWorkload() + }) + }) + Context("Run node performance testing with pre-defined workloads", func() { + BeforeEach(func() { + wl = workloads.NodePerfWorkloads[1] + }) + It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() { + defer cleanup() + runWorkload() + }) + }) + Context("Run node performance testing with pre-defined workloads", func() { + BeforeEach(func() { + wl = workloads.NodePerfWorkloads[2] + }) + It("TensorFlow workload", func() { + defer cleanup() + runWorkload() }) }) }) diff --git a/test/e2e_node/perf/workloads/npb_ep.go b/test/e2e_node/perf/workloads/npb_ep.go index 8478f52b451..973c40b12bd 100644 --- a/test/e2e_node/perf/workloads/npb_ep.go +++ b/test/e2e_node/perf/workloads/npb_ep.go @@ -43,7 +43,7 @@ func (w npbEPWorkload) PodSpec() corev1.PodSpec { var containers []corev1.Container ctn := corev1.Container{ Name: fmt.Sprintf("%s-ctn", w.Name()), - Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0", + Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"), diff --git a/test/e2e_node/perf/workloads/npb_is.go b/test/e2e_node/perf/workloads/npb_is.go index a7ee986360b..75aebff5fc1 100644 --- a/test/e2e_node/perf/workloads/npb_is.go +++ b/test/e2e_node/perf/workloads/npb_is.go @@ -41,7 +41,7 @@ func (w npbISWorkload) PodSpec() corev1.PodSpec { var containers []corev1.Container ctn := corev1.Container{ Name: fmt.Sprintf("%s-ctn", w.Name()), - Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0", + Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),