Merge pull request #58980 from Random-Liu/fix-qps-set

Automatic merge from submit-queue (batch tested with PRs 58899, 58980). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix setting qps in density test.

Current QPS setting code doesn't work. All the density tests with higher QPS are failing. https://k8s-testgrid.appspot.com/sig-node-kubelet#kubelet-benchmark-gce-e2e

We should use existing helper function `tempSetCurrentKubeletConfig` to set QPS.

@kubernetes/sig-node-bugs @mtaufen 

**Release note**:

```release-note
none
```
This commit is contained in:
Kubernetes Submit Queue 2018-01-29 16:45:34 -08:00 committed by GitHub
commit f519cba47f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -189,22 +190,28 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests { for _, testArg := range dTests {
itArg := testArg itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit) Context("", func() {
It(desc, func() { desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency. // The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated. // It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance. // Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
// Note that it will cause higher resource usage. // Note that it will cause higher resource usage.
setKubeletAPIQPSLimit(f, int32(itArg.APIQPSLimit)) tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) {
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true) framework.Logf("Old QPS limit is: %d\n", cfg.KubeAPIQPS)
// Set new API QPS limit
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
})
It(desc, func() {
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
By("Verifying latency") By("Verifying latency")
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false) logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
By("Verifying resource") By("Verifying resource")
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false) logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
})
}) })
} }
}) })
@ -569,34 +576,3 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) { func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) {
logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput") logPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo), "throughput")
} }
// setKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
const restartGap = 40 * time.Second
resp := pollConfigz(2*time.Minute, 5*time.Second)
kubeCfg, err := decodeConfigz(resp)
framework.ExpectNoError(err)
framework.Logf("Old QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
// Set new API QPS limit
kubeCfg.KubeAPIQPS = newAPIQPS
// TODO(coufon): createConfigMap should firstly check whether configmap already exists, if so, use updateConfigMap.
// Calling createConfigMap twice will result in error. It is fine for benchmark test because we only run one test on a new node.
_, err = createConfigMap(f, kubeCfg)
framework.ExpectNoError(err)
// Wait for Kubelet to restart
time.Sleep(restartGap)
// Check new QPS has been set
resp = pollConfigz(2*time.Minute, 5*time.Second)
kubeCfg, err = decodeConfigz(resp)
framework.ExpectNoError(err)
framework.Logf("New QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
// TODO(coufon): check test result to see if we need to retry here
if kubeCfg.KubeAPIQPS != newAPIQPS {
framework.Failf("Fail to set new kubelet API QPS limit.")
}
}