mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #54992 from porridge/perf-timing
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add performance test phase timing export. **What this PR does / why we need it**: First step totwards allowing us to get a quick overview of test length via perf-dash.k8s.io. **Release note**: ```release-note NONE ``` @kubernetes/sig-scalability-feature-requests
This commit is contained in:
commit
fe599c7dcf
@ -763,6 +763,7 @@ test/e2e/chaosmonkey
|
|||||||
test/e2e/common
|
test/e2e/common
|
||||||
test/e2e/framework
|
test/e2e/framework
|
||||||
test/e2e/framework/metrics
|
test/e2e/framework/metrics
|
||||||
|
test/e2e/framework/timer
|
||||||
test/e2e/instrumentation
|
test/e2e/instrumentation
|
||||||
test/e2e/instrumentation/logging
|
test/e2e/instrumentation/logging
|
||||||
test/e2e/instrumentation/monitoring
|
test/e2e/instrumentation/monitoring
|
||||||
|
@ -161,6 +161,7 @@ filegroup(
|
|||||||
":package-srcs",
|
":package-srcs",
|
||||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||||
"//test/e2e/framework/metrics:all-srcs",
|
"//test/e2e/framework/metrics:all-srcs",
|
||||||
|
"//test/e2e/framework/timer:all-srcs",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
34
test/e2e/framework/timer/BUILD
Normal file
34
test/e2e/framework/timer/BUILD
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["timer.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/perftype:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["timer_test.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
|
||||||
|
library = ":go_default_library",
|
||||||
|
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
126
test/e2e/framework/timer/timer.go
Normal file
126
test/e2e/framework/timer/timer.go
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package timer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/perftype"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var now = time.Now
|
||||||
|
|
||||||
|
// Represents a phase of a test. Phases can overlap.
|
||||||
|
type Phase struct {
|
||||||
|
sequenceNumber int
|
||||||
|
name string
|
||||||
|
startTime time.Time
|
||||||
|
endTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (phase *Phase) ended() bool {
|
||||||
|
return !phase.endTime.IsZero()
|
||||||
|
}
|
||||||
|
|
||||||
|
// End marks the phase as ended, unless it had already been ended before.
|
||||||
|
func (phase *Phase) End() {
|
||||||
|
if !phase.ended() {
|
||||||
|
phase.endTime = now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (phase *Phase) label() string {
|
||||||
|
return fmt.Sprintf("%03d-%s", phase.sequenceNumber, phase.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (phase *Phase) duration() time.Duration {
|
||||||
|
endTime := phase.endTime
|
||||||
|
if !phase.ended() {
|
||||||
|
endTime = now()
|
||||||
|
}
|
||||||
|
return endTime.Sub(phase.startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (phase *Phase) humanReadable() string {
|
||||||
|
if phase.ended() {
|
||||||
|
return fmt.Sprintf("Phase %s: %v\n", phase.label(), phase.duration())
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("Phase %s: %v so far\n", phase.label(), phase.duration())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TestPhaseTimer groups phases and provides a way to export their measurements as JSON or human-readable text.
|
||||||
|
// It is safe to use concurrently.
|
||||||
|
type TestPhaseTimer struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
phases []*Phase
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestPhaseTimer creates a new TestPhaseTimer.
|
||||||
|
func NewTestPhaseTimer() *TestPhaseTimer {
|
||||||
|
return &TestPhaseTimer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartPhase starts a new phase.
|
||||||
|
// sequenceNumber is an integer prepended to phaseName in the output, such that lexicographic sorting
|
||||||
|
// of phases in perfdash reconstructs the order of execution. Unfortunately it needs to be
|
||||||
|
// provided manually, since a simple incrementing counter would have the effect that inserting
|
||||||
|
// a new phase would renumber subsequent phases, breaking the continuity of historical records.
|
||||||
|
func (timer *TestPhaseTimer) StartPhase(sequenceNumber int, phaseName string) *Phase {
|
||||||
|
timer.lock.Lock()
|
||||||
|
defer timer.lock.Unlock()
|
||||||
|
newPhase := &Phase{sequenceNumber: sequenceNumber, name: phaseName, startTime: now()}
|
||||||
|
timer.phases = append(timer.phases, newPhase)
|
||||||
|
return newPhase
|
||||||
|
}
|
||||||
|
|
||||||
|
func (timer *TestPhaseTimer) SummaryKind() string {
|
||||||
|
return "TestPhaseTimer"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (timer *TestPhaseTimer) PrintHumanReadable() string {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
timer.lock.Lock()
|
||||||
|
defer timer.lock.Unlock()
|
||||||
|
for _, phase := range timer.phases {
|
||||||
|
buf.WriteString(phase.humanReadable())
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (timer *TestPhaseTimer) PrintJSON() string {
|
||||||
|
data := perftype.PerfData{
|
||||||
|
Version: "v1",
|
||||||
|
DataItems: []perftype.DataItem{{
|
||||||
|
Unit: "s",
|
||||||
|
Labels: map[string]string{"test": "phases"},
|
||||||
|
Data: make(map[string]float64)}}}
|
||||||
|
timer.lock.Lock()
|
||||||
|
defer timer.lock.Unlock()
|
||||||
|
for _, phase := range timer.phases {
|
||||||
|
data.DataItems[0].Data[phase.label()] = phase.duration().Seconds()
|
||||||
|
if !phase.ended() {
|
||||||
|
data.DataItems[0].Labels["ended"] = "false"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return framework.PrettyPrintJSON(data)
|
||||||
|
}
|
92
test/e2e/framework/timer/timer_test.go
Normal file
92
test/e2e/framework/timer/timer_test.go
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package timer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var currentTime time.Time
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
setCurrentTimeSinceEpoch(0)
|
||||||
|
now = func() time.Time { return currentTime }
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCurrentTimeSinceEpoch(duration time.Duration) {
|
||||||
|
currentTime = time.Unix(0, duration.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsageWithDefer(timer *TestPhaseTimer) {
|
||||||
|
defer timer.StartPhase(33, "two").End()
|
||||||
|
setCurrentTimeSinceEpoch(6*time.Second + 500*time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimer(t *testing.T) {
|
||||||
|
RegisterTestingT(t)
|
||||||
|
|
||||||
|
timer := NewTestPhaseTimer()
|
||||||
|
setCurrentTimeSinceEpoch(1 * time.Second)
|
||||||
|
phaseOne := timer.StartPhase(1, "one")
|
||||||
|
setCurrentTimeSinceEpoch(3 * time.Second)
|
||||||
|
testUsageWithDefer(timer)
|
||||||
|
|
||||||
|
Expect(timer.PrintJSON()).To(MatchJSON(`{
|
||||||
|
"version": "v1",
|
||||||
|
"dataItems": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"001-one": 5.5,
|
||||||
|
"033-two": 3.5
|
||||||
|
},
|
||||||
|
"unit": "s",
|
||||||
|
"labels": {
|
||||||
|
"test": "phases",
|
||||||
|
"ended": "false"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`))
|
||||||
|
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 5.5s so far
|
||||||
|
Phase 033-two: 3.5s
|
||||||
|
`))
|
||||||
|
|
||||||
|
setCurrentTimeSinceEpoch(7*time.Second + 500*time.Millisecond)
|
||||||
|
phaseOne.End()
|
||||||
|
|
||||||
|
Expect(timer.PrintJSON()).To(MatchJSON(`{
|
||||||
|
"version": "v1",
|
||||||
|
"dataItems": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"001-one": 6.5,
|
||||||
|
"033-two": 3.5
|
||||||
|
},
|
||||||
|
"unit": "s",
|
||||||
|
"labels": {
|
||||||
|
"test": "phases"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`))
|
||||||
|
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 6.5s
|
||||||
|
Phase 033-two: 3.5s
|
||||||
|
`))
|
||||||
|
}
|
@ -20,6 +20,7 @@ go_library(
|
|||||||
"//pkg/apis/extensions:go_default_library",
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/timer:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
|
@ -44,6 +44,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -76,6 +77,54 @@ type DensityTestConfig struct {
|
|||||||
DaemonConfigs []*testutils.DaemonConfig
|
DaemonConfigs []*testutils.DaemonConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for _, sc := range dtc.SecretConfigs {
|
||||||
|
sc.Run()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) runConfigMapConfigs(testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for _, cmc := range dtc.ConfigMapConfigs {
|
||||||
|
cmc.Run()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) runDaemonConfigs(testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for _, dc := range dtc.DaemonConfigs {
|
||||||
|
dc.Run()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) deleteSecrets(testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for i := range dtc.SecretConfigs {
|
||||||
|
dtc.SecretConfigs[i].Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for i := range dtc.ConfigMapConfigs {
|
||||||
|
dtc.ConfigMapConfigs[i].Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
|
for i := range dtc.DaemonConfigs {
|
||||||
|
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||||
|
dtc.ClientSets[i%numberOfClients],
|
||||||
|
dtc.InternalClientsets[i%numberOfClients],
|
||||||
|
extensions.Kind("DaemonSet"),
|
||||||
|
dtc.DaemonConfigs[i].Namespace,
|
||||||
|
dtc.DaemonConfigs[i].Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
||||||
var apiserverMem uint64
|
var apiserverMem uint64
|
||||||
var controllerMem uint64
|
var controllerMem uint64
|
||||||
@ -196,20 +245,16 @@ func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels
|
|||||||
|
|
||||||
// runDensityTest will perform a density test and return the time it took for
|
// runDensityTest will perform a density test and return the time it took for
|
||||||
// all pods to start
|
// all pods to start
|
||||||
func runDensityTest(dtc DensityTestConfig) time.Duration {
|
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
|
||||||
// Create all secrets, configmaps and daemons.
|
// Create all secrets, configmaps and daemons.
|
||||||
for i := range dtc.SecretConfigs {
|
dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation"))
|
||||||
dtc.SecretConfigs[i].Run()
|
dtc.runConfigMapConfigs(testPhaseDurations.StartPhase(260, "configmaps creation"))
|
||||||
}
|
dtc.runDaemonConfigs(testPhaseDurations.StartPhase(270, "daemonsets creation"))
|
||||||
for i := range dtc.ConfigMapConfigs {
|
|
||||||
dtc.ConfigMapConfigs[i].Run()
|
|
||||||
}
|
|
||||||
for i := range dtc.DaemonConfigs {
|
|
||||||
dtc.DaemonConfigs[i].Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
|
||||||
|
defer replicationCtrlStartupPhase.End()
|
||||||
// Start all replication controllers.
|
// Start all replication controllers.
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
@ -231,7 +276,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
close(logStopCh)
|
close(logStopCh)
|
||||||
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
||||||
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
||||||
|
replicationCtrlStartupPhase.End()
|
||||||
|
|
||||||
|
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
|
||||||
|
defer printPodAllocationPhase.End()
|
||||||
// Print some data about Pod to Node allocation
|
// Print some data about Pod to Node allocation
|
||||||
By("Printing Pod to Node allocation data")
|
By("Printing Pod to Node allocation data")
|
||||||
podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||||
@ -253,11 +301,14 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
for _, node := range nodeNames {
|
for _, node := range nodeNames {
|
||||||
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
|
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
|
||||||
}
|
}
|
||||||
|
defer printPodAllocationPhase.End()
|
||||||
return startupTime
|
return startupTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupDensityTest(dtc DensityTestConfig) {
|
func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion")
|
||||||
|
defer podCleanupPhase.End()
|
||||||
By("Deleting created Collections")
|
By("Deleting created Collections")
|
||||||
numberOfClients := len(dtc.ClientSets)
|
numberOfClients := len(dtc.ClientSets)
|
||||||
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
||||||
@ -275,23 +326,11 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
podCleanupPhase.End()
|
||||||
|
|
||||||
// Delete all secrets, configmaps and daemons.
|
dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))
|
||||||
for i := range dtc.SecretConfigs {
|
dtc.deleteConfigMaps(testPhaseDurations.StartPhase(920, "configmaps deletion"))
|
||||||
dtc.SecretConfigs[i].Stop()
|
dtc.deleteDaemonSets(numberOfClients, testPhaseDurations.StartPhase(930, "daemonsets deletion"))
|
||||||
}
|
|
||||||
for i := range dtc.ConfigMapConfigs {
|
|
||||||
dtc.ConfigMapConfigs[i].Stop()
|
|
||||||
}
|
|
||||||
for i := range dtc.DaemonConfigs {
|
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
|
||||||
dtc.ClientSets[i%numberOfClients],
|
|
||||||
dtc.InternalClientsets[i%numberOfClients],
|
|
||||||
extensions.Kind("DaemonSet"),
|
|
||||||
dtc.DaemonConfigs[i].Namespace,
|
|
||||||
dtc.DaemonConfigs[i].Name,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
||||||
@ -316,6 +355,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
|
|
||||||
testCaseBaseName := "density"
|
testCaseBaseName := "density"
|
||||||
missingMeasurements := 0
|
missingMeasurements := 0
|
||||||
|
var testPhaseDurations *timer.TestPhaseTimer
|
||||||
|
|
||||||
// Gathers data prior to framework namespace teardown
|
// Gathers data prior to framework namespace teardown
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
@ -349,6 +389,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
summaries = append(summaries, latency)
|
summaries = append(summaries, latency)
|
||||||
}
|
}
|
||||||
|
summaries = append(summaries, testPhaseDurations)
|
||||||
|
|
||||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||||
|
|
||||||
@ -368,6 +409,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||||
|
|
||||||
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
|
||||||
nodeCount = len(nodes.Items)
|
nodeCount = len(nodes.Items)
|
||||||
@ -454,6 +496,8 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
)
|
)
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
It(name, func() {
|
It(name, func() {
|
||||||
|
nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
|
||||||
|
defer nodePrepPhase.End()
|
||||||
nodePreparer := framework.NewE2ETestNodePreparer(
|
nodePreparer := framework.NewE2ETestNodePreparer(
|
||||||
f.ClientSet,
|
f.ClientSet,
|
||||||
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
||||||
@ -469,10 +513,11 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer fileHndl.Close()
|
defer fileHndl.Close()
|
||||||
|
nodePrepPhase.End()
|
||||||
|
|
||||||
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
||||||
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||||
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode))
|
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
configs := make([]testutils.RunObjectConfig, numberOfCollections)
|
configs := make([]testutils.RunObjectConfig, numberOfCollections)
|
||||||
@ -567,7 +612,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
LogFunc: framework.Logf,
|
LogFunc: framework.Logf,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
|
||||||
if itArg.runLatencyTest {
|
if itArg.runLatencyTest {
|
||||||
By("Scheduling additional Pods to measure startup latencies")
|
By("Scheduling additional Pods to measure startup latencies")
|
||||||
|
|
||||||
@ -650,6 +695,8 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create some additional pods with throughput ~5 pods/sec.
|
// Create some additional pods with throughput ~5 pods/sec.
|
||||||
|
latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
|
||||||
|
defer latencyPodStartupPhase.End()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(nodeCount)
|
wg.Add(nodeCount)
|
||||||
// Explicitly set requests here.
|
// Explicitly set requests here.
|
||||||
@ -673,7 +720,10 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
latencyPodStartupPhase.End()
|
||||||
|
|
||||||
|
latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
|
||||||
|
defer latencyMeasurementPhase.End()
|
||||||
By("Waiting for all Pods begin observed by the watch...")
|
By("Waiting for all Pods begin observed by the watch...")
|
||||||
waitTimeout := 10 * time.Minute
|
waitTimeout := 10 * time.Minute
|
||||||
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
||||||
@ -769,17 +819,20 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
|
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
|
||||||
|
|
||||||
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
|
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
|
||||||
|
latencyMeasurementPhase.End()
|
||||||
|
|
||||||
By("Removing additional replication controllers")
|
By("Removing additional replication controllers")
|
||||||
|
podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
|
||||||
|
defer podDeletionPhase.End()
|
||||||
deleteRC := func(i int) {
|
deleteRC := func(i int) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
|
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
|
||||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
|
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
|
||||||
}
|
}
|
||||||
workqueue.Parallelize(25, nodeCount, deleteRC)
|
workqueue.Parallelize(25, nodeCount, deleteRC)
|
||||||
|
podDeletionPhase.End()
|
||||||
}
|
}
|
||||||
|
cleanupDensityTest(dConfig, testPhaseDurations)
|
||||||
cleanupDensityTest(dConfig)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -88,6 +89,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
var configMapConfigs []*testutils.ConfigMapConfig
|
var configMapConfigs []*testutils.ConfigMapConfig
|
||||||
|
|
||||||
testCaseBaseName := "load"
|
testCaseBaseName := "load"
|
||||||
|
var testPhaseDurations *timer.TestPhaseTimer
|
||||||
|
|
||||||
// Gathers metrics before teardown
|
// Gathers metrics before teardown
|
||||||
// TODO add flag that allows to skip cleanup on failure
|
// TODO add flag that allows to skip cleanup on failure
|
||||||
@ -96,8 +98,9 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount)
|
highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
summaries := make([]framework.TestDataSummary, 0, 1)
|
summaries := make([]framework.TestDataSummary, 0, 2)
|
||||||
summaries = append(summaries, metrics)
|
summaries = append(summaries, metrics)
|
||||||
|
summaries = append(summaries, testPhaseDurations)
|
||||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||||
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
||||||
}
|
}
|
||||||
@ -123,6 +126,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
f.NamespaceDeletionTimeout = time.Hour
|
f.NamespaceDeletionTimeout = time.Hour
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
|
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||||
clientset = f.ClientSet
|
clientset = f.ClientSet
|
||||||
|
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
@ -187,12 +191,14 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
It(name, func() {
|
It(name, func() {
|
||||||
// Create a number of namespaces.
|
// Create a number of namespaces.
|
||||||
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||||
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode))
|
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
|
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
|
||||||
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
|
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
|
||||||
|
|
||||||
|
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
|
||||||
|
defer serviceCreationPhase.End()
|
||||||
if itArg.services {
|
if itArg.services {
|
||||||
framework.Logf("Creating services")
|
framework.Logf("Creating services")
|
||||||
services := generateServicesForConfigs(configs)
|
services := generateServicesForConfigs(configs)
|
||||||
@ -204,6 +210,8 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
|
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
|
||||||
framework.Logf("%v Services created.", len(services))
|
framework.Logf("%v Services created.", len(services))
|
||||||
defer func(services []*v1.Service) {
|
defer func(services []*v1.Service) {
|
||||||
|
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
||||||
|
defer serviceCleanupPhase.End()
|
||||||
framework.Logf("Starting to delete services...")
|
framework.Logf("Starting to delete services...")
|
||||||
deleteService := func(i int) {
|
deleteService := func(i int) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
@ -216,17 +224,26 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
} else {
|
} else {
|
||||||
framework.Logf("Skipping service creation")
|
framework.Logf("Skipping service creation")
|
||||||
}
|
}
|
||||||
|
serviceCreationPhase.End()
|
||||||
// Create all secrets.
|
// Create all secrets.
|
||||||
|
secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation")
|
||||||
|
defer secretsCreationPhase.End()
|
||||||
for i := range secretConfigs {
|
for i := range secretConfigs {
|
||||||
secretConfigs[i].Run()
|
secretConfigs[i].Run()
|
||||||
defer secretConfigs[i].Stop()
|
defer secretConfigs[i].Stop()
|
||||||
}
|
}
|
||||||
|
secretsCreationPhase.End()
|
||||||
// Create all configmaps.
|
// Create all configmaps.
|
||||||
|
configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation")
|
||||||
|
defer configMapsCreationPhase.End()
|
||||||
for i := range configMapConfigs {
|
for i := range configMapConfigs {
|
||||||
configMapConfigs[i].Run()
|
configMapConfigs[i].Run()
|
||||||
defer configMapConfigs[i].Stop()
|
defer configMapConfigs[i].Stop()
|
||||||
}
|
}
|
||||||
// StartDeamon if needed
|
configMapsCreationPhase.End()
|
||||||
|
// StartDaemon if needed
|
||||||
|
daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation")
|
||||||
|
defer daemonSetCreationPhase.End()
|
||||||
for i := 0; i < itArg.daemonsPerNode; i++ {
|
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||||
daemonName := fmt.Sprintf("load-daemon-%v", i)
|
daemonName := fmt.Sprintf("load-daemon-%v", i)
|
||||||
daemonConfig := &testutils.DaemonConfig{
|
daemonConfig := &testutils.DaemonConfig{
|
||||||
@ -246,6 +263,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
))
|
))
|
||||||
}(daemonConfig)
|
}(daemonConfig)
|
||||||
}
|
}
|
||||||
|
daemonSetCreationPhase.End()
|
||||||
|
|
||||||
// Simulate lifetime of RC:
|
// Simulate lifetime of RC:
|
||||||
// * create with initial size
|
// * create with initial size
|
||||||
@ -262,9 +280,10 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
// to make it possible to create/schedule them in the meantime.
|
// to make it possible to create/schedule them in the meantime.
|
||||||
// Currently we assume <throughput> pods/second average throughput.
|
// Currently we assume <throughput> pods/second average throughput.
|
||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
framework.Logf("Starting to create ReplicationControllers...")
|
framework.Logf("Starting to create %v objects...", itArg.kind)
|
||||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
createAllResources(configs, creatingTime)
|
|
||||||
|
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
// We would like to spread scaling replication controllers over time
|
// We would like to spread scaling replication controllers over time
|
||||||
@ -272,20 +291,20 @@ var _ = SIGDescribe("Load capacity", func() {
|
|||||||
// Currently we assume that <throughput> pods/second average throughput.
|
// Currently we assume that <throughput> pods/second average throughput.
|
||||||
// The expected number of created/deleted pods is less than totalPods/3.
|
// The expected number of created/deleted pods is less than totalPods/3.
|
||||||
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
||||||
framework.Logf("Starting to scale ReplicationControllers first time...")
|
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
|
||||||
scaleAllResources(configs, scalingTime)
|
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
framework.Logf("Starting to scale ReplicationControllers second time...")
|
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
|
||||||
scaleAllResources(configs, scalingTime)
|
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
// Cleanup all created replication controllers.
|
// Cleanup all created replication controllers.
|
||||||
// Currently we assume <throughput> pods/second average deletion throughput.
|
// Currently we assume <throughput> pods/second average deletion throughput.
|
||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
framework.Logf("Starting to delete ReplicationControllers...")
|
framework.Logf("Starting to delete %v objects...", itArg.kind)
|
||||||
deleteAllResources(configs, deletingTime)
|
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -534,7 +553,8 @@ func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.Conditio
|
|||||||
return wait.ExponentialBackoff(backoff, fn)
|
return wait.ExponentialBackoff(backoff, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) {
|
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
@ -551,7 +571,8 @@ func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creati
|
|||||||
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
|
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) {
|
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
@ -592,7 +613,8 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) {
|
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) {
|
||||||
|
defer testPhase.End()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
@ -617,7 +639,8 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) {
|
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
|
||||||
|
defer testPhase.End()
|
||||||
namespaces := []*v1.Namespace{}
|
namespaces := []*v1.Namespace{}
|
||||||
for i := 1; i <= namespaceCount; i++ {
|
for i := 1; i <= namespaceCount; i++ {
|
||||||
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
|
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
|
||||||
|
Loading…
Reference in New Issue
Block a user