mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Refactor the node perf tests to run on non-amd64 cluster
This commit is contained in:
parent
c90bf8d8ea
commit
3818f64780
@ -53,8 +53,8 @@ var NodeImageWhiteList = sets.NewString(
|
|||||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||||
imageutils.GetPauseImageName(),
|
imageutils.GetPauseImageName(),
|
||||||
framework.GetGPUDevicePluginImage(),
|
framework.GetGPUDevicePluginImage(),
|
||||||
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
|
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
|
||||||
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
|
"gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
|
||||||
"gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
|
"gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -56,53 +56,74 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
|
|||||||
// Slow by design.
|
// Slow by design.
|
||||||
var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
|
var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
|
||||||
f := framework.NewDefaultFramework("node-performance-testing")
|
f := framework.NewDefaultFramework("node-performance-testing")
|
||||||
|
var (
|
||||||
|
wl workloads.NodePerfWorkload
|
||||||
|
oldCfg *kubeletconfig.KubeletConfiguration
|
||||||
|
newCfg *kubeletconfig.KubeletConfiguration
|
||||||
|
pod *corev1.Pod
|
||||||
|
)
|
||||||
|
JustBeforeEach(func() {
|
||||||
|
err := wl.PreTestExec()
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
newCfg, err = wl.KubeletConfig(oldCfg)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
setKubeletConfig(f, newCfg)
|
||||||
|
})
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
gp := int64(0)
|
||||||
|
delOpts := metav1.DeleteOptions{
|
||||||
|
GracePeriodSeconds: &gp,
|
||||||
|
}
|
||||||
|
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||||
|
By("running the post test exec from the workload")
|
||||||
|
err := wl.PostTestExec()
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
setKubeletConfig(f, oldCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
runWorkload := func() {
|
||||||
|
By("running the workload and waiting for success")
|
||||||
|
// Make the pod for the workload.
|
||||||
|
pod = makeNodePerfPod(wl)
|
||||||
|
// Create the pod.
|
||||||
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
// Wait for pod success.
|
||||||
|
f.PodClient().WaitForSuccess(pod.Name, wl.Timeout())
|
||||||
|
podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
perf, err := wl.ExtractPerformanceFromLogs(podLogs)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
framework.Logf("Time to complete workload %s: %v", wl.Name(), perf)
|
||||||
|
}
|
||||||
|
|
||||||
Context("Run node performance testing with pre-defined workloads", func() {
|
Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
It("run each pre-defined workload", func() {
|
BeforeEach(func() {
|
||||||
By("running the workloads")
|
wl = workloads.NodePerfWorkloads[0]
|
||||||
for _, workload := range workloads.NodePerfWorkloads {
|
})
|
||||||
By("running the pre test exec from the workload")
|
It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() {
|
||||||
err := workload.PreTestExec()
|
defer cleanup()
|
||||||
framework.ExpectNoError(err)
|
runWorkload()
|
||||||
|
})
|
||||||
By("restarting kubelet with required configuration")
|
})
|
||||||
// Get the Kubelet config required for this workload.
|
Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
oldCfg, err := getCurrentKubeletConfig()
|
BeforeEach(func() {
|
||||||
framework.ExpectNoError(err)
|
wl = workloads.NodePerfWorkloads[1]
|
||||||
|
})
|
||||||
newCfg, err := workload.KubeletConfig(oldCfg)
|
It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() {
|
||||||
framework.ExpectNoError(err)
|
defer cleanup()
|
||||||
// Set the Kubelet config required for this workload.
|
runWorkload()
|
||||||
setKubeletConfig(f, newCfg)
|
})
|
||||||
|
})
|
||||||
By("running the workload and waiting for success")
|
Context("Run node performance testing with pre-defined workloads", func() {
|
||||||
// Make the pod for the workload.
|
BeforeEach(func() {
|
||||||
pod := makeNodePerfPod(workload)
|
wl = workloads.NodePerfWorkloads[2]
|
||||||
|
})
|
||||||
// Create the pod.
|
It("TensorFlow workload", func() {
|
||||||
pod = f.PodClient().CreateSync(pod)
|
defer cleanup()
|
||||||
// Wait for pod success.
|
runWorkload()
|
||||||
f.PodClient().WaitForSuccess(pod.Name, workload.Timeout())
|
|
||||||
podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
perf, err := workload.ExtractPerformanceFromLogs(podLogs)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
framework.Logf("Time to complete workload %s: %v", workload.Name(), perf)
|
|
||||||
|
|
||||||
// Delete the pod.
|
|
||||||
gp := int64(0)
|
|
||||||
delOpts := metav1.DeleteOptions{
|
|
||||||
GracePeriodSeconds: &gp,
|
|
||||||
}
|
|
||||||
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
|
||||||
|
|
||||||
By("running the post test exec from the workload")
|
|
||||||
err = workload.PostTestExec()
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
// Set the Kubelet config back to the old one.
|
|
||||||
setKubeletConfig(f, oldCfg)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -43,7 +43,7 @@ func (w npbEPWorkload) PodSpec() corev1.PodSpec {
|
|||||||
var containers []corev1.Container
|
var containers []corev1.Container
|
||||||
ctn := corev1.Container{
|
ctn := corev1.Container{
|
||||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep-amd64:1.0",
|
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
Requests: corev1.ResourceList{
|
Requests: corev1.ResourceList{
|
||||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||||
|
@ -41,7 +41,7 @@ func (w npbISWorkload) PodSpec() corev1.PodSpec {
|
|||||||
var containers []corev1.Container
|
var containers []corev1.Container
|
||||||
ctn := corev1.Container{
|
ctn := corev1.Container{
|
||||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is-amd64:1.0",
|
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
Requests: corev1.ResourceList{
|
Requests: corev1.ResourceList{
|
||||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
||||||
|
Loading…
Reference in New Issue
Block a user