Merge pull request #63805 from losipiuk/lo/gpu-e2e-followup

Automatic merge from submit-queue (batch tested with PRs 63492, 62379, 61984, 63805, 63807). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add e2e test to verify that GPU pool is not scaled up if GPUs are not requested

**Release note**:
```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-05-14 17:11:23 -07:00 committed by GitHub
commit 40f8c91e70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -253,6 +253,32 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It("Should not scale GPU pool up if pod does not require GPUs [Feature:ClusterSizeAutoscalingGpu]", func() {
framework.SkipUnlessProviderIs("gke")
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, "nvidia-tesla-k80", 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("Should scale down GPU pool from 1 [Feature:ClusterSizeAutoscalingGpu]", func() {
framework.SkipUnlessProviderIs("gke")