Merge pull request #65437 from losipiuk/lo/gpu-tests-from-env

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Read gpu type from TESTED_GPU_TYPE env variable

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-06-26 08:55:44 -07:00 committed by GitHub
commit 0d9c432542
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -22,6 +22,7 @@ import (
"io/ioutil" "io/ioutil"
"math" "math"
"net/http" "net/http"
"os"
"os/exec" "os/exec"
"regexp" "regexp"
"strconv" "strconv"
@ -207,12 +208,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) }) func() { simpleScaleUpTest(0) })
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"} gpuType := os.Getenv("TESTED_GPU_TYPE")
for _, gpuType := range supportedGpuTypes {
gpuType := gpuType // create new variable for each iteration step
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool" const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0) addGpuNodePool(gpuPoolName, gpuType, 1, 0)
@ -236,6 +239,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool" const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1) addGpuNodePool(gpuPoolName, gpuType, 1, 1)
@ -262,6 +269,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool" const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0) addGpuNodePool(gpuPoolName, gpuType, 1, 0)
@ -287,6 +298,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke") framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool" const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1) addGpuNodePool(gpuPoolName, gpuType, 1, 1)
@ -310,7 +325,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size == nodeCount }, scaleDownTimeout)) func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
}) })
}
// TODO consider moving to [Feature:ClusterSizeAutoscalingGpu] as soon as NAP goes out of beta. Currently // TODO consider moving to [Feature:ClusterSizeAutoscalingGpu] as soon as NAP goes out of beta. Currently
// project needed to run the NAP tests require whitelisting for NAP alpha // project needed to run the NAP tests require whitelisting for NAP alpha