mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
get image and machine info from api server instead of passing from test
# Please enter the commit message for your changes. Lines starting
This commit is contained in:
parent
ac8aae584d
commit
a683eb0418
@ -19,12 +19,15 @@ limitations under the License.
|
|||||||
package e2e_node
|
package e2e_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/perftype"
|
"k8s.io/kubernetes/test/e2e/perftype"
|
||||||
|
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -43,12 +46,9 @@ type NodeTimeSeries struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// logDensityTimeSeries logs the time series data of operation and resource usage
|
// logDensityTimeSeries logs the time series data of operation and resource usage
|
||||||
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]unversioned.Time, testName string) {
|
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]unversioned.Time, testInfo map[string]string) {
|
||||||
timeSeries := &NodeTimeSeries{
|
timeSeries := &NodeTimeSeries{
|
||||||
Labels: map[string]string{
|
Labels: testInfo,
|
||||||
"node": framework.TestContext.NodeName,
|
|
||||||
"test": testName,
|
|
||||||
},
|
|
||||||
Version: currentDataVersion,
|
Version: currentDataVersion,
|
||||||
}
|
}
|
||||||
// Attach operation time series.
|
// Attach operation time series.
|
||||||
@ -80,7 +80,7 @@ func getCumulatedPodTimeSeries(timePerPod map[string]unversioned.Time) []int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getLatencyPerfData returns perf data of pod startup latency.
|
// getLatencyPerfData returns perf data of pod startup latency.
|
||||||
func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perftype.PerfData {
|
func getLatencyPerfData(latency framework.LatencyMetric, testInfo map[string]string) *perftype.PerfData {
|
||||||
return &perftype.PerfData{
|
return &perftype.PerfData{
|
||||||
Version: currentDataVersion,
|
Version: currentDataVersion,
|
||||||
DataItems: []perftype.DataItem{
|
DataItems: []perftype.DataItem{
|
||||||
@ -98,15 +98,12 @@ func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perft
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: map[string]string{
|
Labels: testInfo,
|
||||||
"node": framework.TestContext.NodeName,
|
|
||||||
"test": testName,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getThroughputPerfData returns perf data of pod creation startup throughput.
|
// getThroughputPerfData returns perf data of pod creation startup throughput.
|
||||||
func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testName string) *perftype.PerfData {
|
func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) *perftype.PerfData {
|
||||||
return &perftype.PerfData{
|
return &perftype.PerfData{
|
||||||
Version: currentDataVersion,
|
Version: currentDataVersion,
|
||||||
DataItems: []perftype.DataItem{
|
DataItems: []perftype.DataItem{
|
||||||
@ -122,9 +119,40 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: map[string]string{
|
Labels: testInfo,
|
||||||
"node": framework.TestContext.NodeName,
|
}
|
||||||
"test": testName,
|
}
|
||||||
},
|
|
||||||
|
// getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels.
|
||||||
|
func getTestNodeInfo(f *framework.Framework, testName string) map[string]string {
|
||||||
|
nodeName := framework.TestContext.NodeName
|
||||||
|
node, err := f.Client.Nodes().Get(nodeName)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
cpu, ok := node.Status.Capacity["cpu"]
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Fail to fetch CPU capacity value of test node.")
|
||||||
|
}
|
||||||
|
|
||||||
|
memory, ok := node.Status.Capacity["memory"]
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Fail to fetch Memory capacity value of test node.")
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuValue, ok := cpu.AsInt64()
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Fail to fetch CPU capacity value as Int64.")
|
||||||
|
}
|
||||||
|
|
||||||
|
memoryValue, ok := memory.AsInt64()
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Fail to fetch Memory capacity value as Int64.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]string{
|
||||||
|
"node": nodeName,
|
||||||
|
"test": testName,
|
||||||
|
"image": node.Status.NodeInfo.OSImage,
|
||||||
|
"machine": fmt.Sprintf("cpu:%dcore,memory:%.1fGB", cpuValue, float32(memoryValue)/(1024*1024*1024)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -54,16 +54,12 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ns string
|
rc *ResourceCollector
|
||||||
nodeName string
|
|
||||||
rc *ResourceCollector
|
|
||||||
)
|
)
|
||||||
|
|
||||||
f := framework.NewDefaultFramework("density-test")
|
f := framework.NewDefaultFramework("density-test")
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
ns = f.Namespace.Name
|
|
||||||
nodeName = framework.TestContext.NodeName
|
|
||||||
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
|
||||||
f.PodClient().CreateSync(getCadvisorPod())
|
f.PodClient().CreateSync(getCadvisorPod())
|
||||||
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
|
||||||
@ -102,14 +98,15 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval",
|
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval",
|
||||||
itArg.podsNr, itArg.interval), func() {
|
itArg.podsNr, itArg.interval), func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testName := itArg.getTestName()
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, false)
|
|
||||||
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, false)
|
||||||
|
|
||||||
By("Verifying latency")
|
By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testName, true)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||||
|
|
||||||
By("Verifying resource")
|
By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testName, true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -159,14 +156,15 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]",
|
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]",
|
||||||
itArg.podsNr, itArg.interval), func() {
|
itArg.podsNr, itArg.interval), func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testName := itArg.getTestName()
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, true)
|
|
||||||
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||||
|
|
||||||
By("Verifying latency")
|
By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testName, false)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testName, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -195,19 +193,19 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]",
|
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]",
|
||||||
itArg.podsNr, itArg.interval, itArg.APIQPSLimit), func() {
|
itArg.podsNr, itArg.interval, itArg.APIQPSLimit), func() {
|
||||||
itArg.createMethod = "batch"
|
itArg.createMethod = "batch"
|
||||||
testName := itArg.getTestName()
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||||
// Note that it will cause higher resource usage.
|
// Note that it will cause higher resource usage.
|
||||||
setKubeletAPIQPSLimit(f, int32(itArg.APIQPSLimit))
|
setKubeletAPIQPSLimit(f, int32(itArg.APIQPSLimit))
|
||||||
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, true)
|
batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true)
|
||||||
|
|
||||||
By("Verifying latency")
|
By("Verifying latency")
|
||||||
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testName, false)
|
logAndVerifyLatency(batchLag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testName, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -238,14 +236,14 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods",
|
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods",
|
||||||
itArg.podsNr, itArg.bgPodsNr), func() {
|
itArg.podsNr, itArg.bgPodsNr), func() {
|
||||||
itArg.createMethod = "sequence"
|
itArg.createMethod = "sequence"
|
||||||
testName := itArg.getTestName()
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg)
|
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||||
|
|
||||||
By("Verifying latency")
|
By("Verifying latency")
|
||||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testName, true)
|
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, true)
|
||||||
|
|
||||||
By("Verifying resource")
|
By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testName, true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -271,14 +269,14 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
|
|||||||
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]",
|
It(fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]",
|
||||||
itArg.podsNr, itArg.bgPodsNr), func() {
|
itArg.podsNr, itArg.bgPodsNr), func() {
|
||||||
itArg.createMethod = "sequence"
|
itArg.createMethod = "sequence"
|
||||||
testName := itArg.getTestName()
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg)
|
batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo)
|
||||||
|
|
||||||
By("Verifying latency")
|
By("Verifying latency")
|
||||||
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testName, false)
|
logAndVerifyLatency(batchlag, e2eLags, itArg.podStartupLimits, itArg.podBatchStartupLimit, testInfo, false)
|
||||||
|
|
||||||
By("Verifying resource")
|
By("Verifying resource")
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testName, false)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -314,7 +312,7 @@ func (dt *densityTest) getTestName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runDensityBatchTest runs the density batch pod creation test
|
// runDensityBatchTest runs the density batch pod creation test
|
||||||
func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest,
|
func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string,
|
||||||
isLogTimeSeries bool) (time.Duration, []framework.PodLatencyData) {
|
isLogTimeSeries bool) (time.Duration, []framework.PodLatencyData) {
|
||||||
const (
|
const (
|
||||||
podType = "density_test_pod"
|
podType = "density_test_pod"
|
||||||
@ -390,19 +388,18 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
sort.Sort(framework.LatencySlice(e2eLags))
|
sort.Sort(framework.LatencySlice(e2eLags))
|
||||||
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
||||||
|
|
||||||
testName := testArg.getTestName()
|
|
||||||
// Log time series data.
|
// Log time series data.
|
||||||
if isLogTimeSeries {
|
if isLogTimeSeries {
|
||||||
logDensityTimeSeries(rc, createTimes, watchTimes, testName)
|
logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
|
||||||
}
|
}
|
||||||
// Log throughput data.
|
// Log throughput data.
|
||||||
logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testName)
|
logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo)
|
||||||
|
|
||||||
return batchLag, e2eLags
|
return batchLag, e2eLags
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDensitySeqTest runs the density sequential pod creation test
|
// runDensitySeqTest runs the density sequential pod creation test
|
||||||
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
|
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []framework.PodLatencyData) {
|
||||||
const (
|
const (
|
||||||
podType = "density_test_pod"
|
podType = "density_test_pod"
|
||||||
sleepBeforeCreatePods = 30 * time.Second
|
sleepBeforeCreatePods = 30 * time.Second
|
||||||
@ -426,7 +423,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||||||
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
||||||
|
|
||||||
// Log throughput data.
|
// Log throughput data.
|
||||||
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testArg.getTestName())
|
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)
|
||||||
|
|
||||||
return batchlag, e2eLags
|
return batchlag, e2eLags
|
||||||
}
|
}
|
||||||
@ -541,7 +538,7 @@ func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Dur
|
|||||||
|
|
||||||
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
|
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
|
||||||
func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, podStartupLimits framework.LatencyMetric,
|
func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, podStartupLimits framework.LatencyMetric,
|
||||||
podBatchStartupLimit time.Duration, testName string, isVerify bool) {
|
podBatchStartupLimit time.Duration, testInfo map[string]string, isVerify bool) {
|
||||||
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
|
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
|
||||||
|
|
||||||
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
|
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
|
||||||
@ -551,7 +548,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
|||||||
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
|
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
|
||||||
|
|
||||||
// log latency perf data
|
// log latency perf data
|
||||||
framework.PrintPerfData(getLatencyPerfData(podCreateLatency.Latency, testName))
|
framework.PrintPerfData(getLatencyPerfData(podCreateLatency.Latency, testInfo))
|
||||||
|
|
||||||
if isVerify {
|
if isVerify {
|
||||||
// check whether e2e pod startup time is acceptable.
|
// check whether e2e pod startup time is acceptable.
|
||||||
@ -566,8 +563,8 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD
|
|||||||
}
|
}
|
||||||
|
|
||||||
// logThroughput calculates and logs pod creation throughput.
|
// logThroughput calculates and logs pod creation throughput.
|
||||||
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testName string) {
|
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testInfo map[string]string) {
|
||||||
framework.PrintPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testName))
|
framework.PrintPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testInfo))
|
||||||
}
|
}
|
||||||
|
|
||||||
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
|
||||||
|
@ -38,7 +38,6 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ns string
|
|
||||||
rc *ResourceCollector
|
rc *ResourceCollector
|
||||||
om *framework.RuntimeOperationMonitor
|
om *framework.RuntimeOperationMonitor
|
||||||
)
|
)
|
||||||
@ -46,7 +45,6 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
f := framework.NewDefaultFramework("resource-usage")
|
f := framework.NewDefaultFramework("resource-usage")
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
ns = f.Namespace.Name
|
|
||||||
om = framework.NewRuntimeOperationMonitor(f.Client)
|
om = framework.NewRuntimeOperationMonitor(f.Client)
|
||||||
// The test collects resource usage from a standalone Cadvisor pod.
|
// The test collects resource usage from a standalone Cadvisor pod.
|
||||||
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
|
||||||
@ -83,9 +81,12 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
itArg := testArg
|
itArg := testArg
|
||||||
|
|
||||||
It(fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr), func() {
|
It(fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr), func() {
|
||||||
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
|
|
||||||
runResourceUsageTest(f, rc, itArg)
|
runResourceUsageTest(f, rc, itArg)
|
||||||
|
|
||||||
// Log and verify resource usage
|
// Log and verify resource usage
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, itArg.getTestName(), true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -107,9 +108,12 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() {
|
|||||||
itArg := testArg
|
itArg := testArg
|
||||||
|
|
||||||
It(fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr), func() {
|
It(fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr), func() {
|
||||||
|
testInfo := getTestNodeInfo(f, itArg.getTestName())
|
||||||
|
|
||||||
runResourceUsageTest(f, rc, itArg)
|
runResourceUsageTest(f, rc, itArg)
|
||||||
|
|
||||||
// Log and verify resource usage
|
// Log and verify resource usage
|
||||||
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, itArg.getTestName(), true)
|
logAndVerifyResource(f, rc, itArg.cpuLimits, itArg.memLimits, testInfo, false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -176,7 +180,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
|
|
||||||
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
|
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
|
||||||
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
|
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
|
||||||
memLimits framework.ResourceUsagePerContainer, testName string, isVerify bool) {
|
memLimits framework.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
|
||||||
nodeName := framework.TestContext.NodeName
|
nodeName := framework.TestContext.NodeName
|
||||||
|
|
||||||
// Obtain memory PerfData
|
// Obtain memory PerfData
|
||||||
@ -195,10 +199,8 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
|
|||||||
cpuSummaryPerNode[nodeName] = cpuSummary
|
cpuSummaryPerNode[nodeName] = cpuSummary
|
||||||
|
|
||||||
// Print resource usage
|
// Print resource usage
|
||||||
framework.PrintPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode,
|
framework.PrintPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo))
|
||||||
map[string]string{"test": testName, "node": nodeName}))
|
framework.PrintPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo))
|
||||||
framework.PrintPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode,
|
|
||||||
map[string]string{"test": testName, "node": nodeName}))
|
|
||||||
|
|
||||||
// Verify resource usage
|
// Verify resource usage
|
||||||
if isVerify {
|
if isVerify {
|
||||||
|
@ -598,8 +598,8 @@ func imageToInstanceName(imageConfig *internalGCEImage) string {
|
|||||||
if imageConfig.machine == "" {
|
if imageConfig.machine == "" {
|
||||||
return *instanceNamePrefix + "-" + imageConfig.image
|
return *instanceNamePrefix + "-" + imageConfig.image
|
||||||
}
|
}
|
||||||
// For benchmark test, node name has the format 'machine-image-uuid'.
|
// For benchmark test, node name has the format 'machine-image-uuid' to run
|
||||||
// Node name is added to test data item labels and used for benchmark dashboard.
|
// different machine types with the same image in parallel
|
||||||
return imageConfig.machine + "-" + imageConfig.image + "-" + uuid.NewUUID().String()[:8]
|
return imageConfig.machine + "-" + imageConfig.image + "-" + uuid.NewUUID().String()[:8]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user