From 115bd09ff94a3b66155d8c45686002f8f38791f0 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 2 Nov 2017 12:46:15 +0100 Subject: [PATCH 1/2] Fix typo and progress messages. --- test/e2e/scalability/load.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 9cd5bd19df0..136d10e4985 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -226,7 +226,7 @@ var _ = SIGDescribe("Load capacity", func() { configMapConfigs[i].Run() defer configMapConfigs[i].Stop() } - // StartDeamon if needed + // StartDaemon if needed for i := 0; i < itArg.daemonsPerNode; i++ { daemonName := fmt.Sprintf("load-daemon-%v", i) daemonConfig := &testutils.DaemonConfig{ @@ -262,7 +262,7 @@ var _ = SIGDescribe("Load capacity", func() { // to make it possible to create/schedule them in the meantime. // Currently we assume pods/second average throughput. // We may want to revisit it in the future. - framework.Logf("Starting to create ReplicationControllers...") + framework.Logf("Starting to create %v objects...", itArg.kind) creatingTime := time.Duration(totalPods/throughput) * time.Second createAllResources(configs, creatingTime) By("============================================================================") @@ -272,11 +272,11 @@ var _ = SIGDescribe("Load capacity", func() { // Currently we assume that pods/second average throughput. // The expected number of created/deleted pods is less than totalPods/3. scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second - framework.Logf("Starting to scale ReplicationControllers first time...") + framework.Logf("Starting to scale %v objects first time...", itArg.kind) scaleAllResources(configs, scalingTime) By("============================================================================") - framework.Logf("Starting to scale ReplicationControllers second time...") + framework.Logf("Starting to scale %v objects second time...", itArg.kind) scaleAllResources(configs, scalingTime) By("============================================================================") @@ -284,7 +284,7 @@ var _ = SIGDescribe("Load capacity", func() { // Currently we assume pods/second average deletion throughput. // We may want to revisit it in the future. deletingTime := time.Duration(totalPods/throughput) * time.Second - framework.Logf("Starting to delete ReplicationControllers...") + framework.Logf("Starting to delete %v objects...", itArg.kind) deleteAllResources(configs, deletingTime) }) } From 96089e0b7968c0964d4b1de016cda38f6f171d02 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 2 Nov 2017 12:48:02 +0100 Subject: [PATCH 2/2] Add performance test phase timing export. --- hack/.golint_failures | 1 + test/e2e/framework/BUILD | 1 + test/e2e/framework/timer/BUILD | 34 +++++++ test/e2e/framework/timer/timer.go | 126 +++++++++++++++++++++++++ test/e2e/framework/timer/timer_test.go | 92 ++++++++++++++++++ test/e2e/scalability/BUILD | 1 + test/e2e/scalability/density.go | 115 ++++++++++++++++------ test/e2e/scalability/load.go | 43 +++++++-- 8 files changed, 372 insertions(+), 41 deletions(-) create mode 100644 test/e2e/framework/timer/BUILD create mode 100644 test/e2e/framework/timer/timer.go create mode 100644 test/e2e/framework/timer/timer_test.go diff --git a/hack/.golint_failures b/hack/.golint_failures index beba119c910..bfb24dc7cad 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -760,6 +760,7 @@ test/e2e/chaosmonkey test/e2e/common test/e2e/framework test/e2e/framework/metrics +test/e2e/framework/timer test/e2e/instrumentation test/e2e/instrumentation/logging test/e2e/instrumentation/monitoring diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index ac6f876895d..65731422040 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -161,6 +161,7 @@ filegroup( ":package-srcs", "//test/e2e/framework/ginkgowrapper:all-srcs", "//test/e2e/framework/metrics:all-srcs", + "//test/e2e/framework/timer:all-srcs", ], tags = ["automanaged"], ) diff --git a/test/e2e/framework/timer/BUILD b/test/e2e/framework/timer/BUILD new file mode 100644 index 00000000000..afdc516b975 --- /dev/null +++ b/test/e2e/framework/timer/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["timer.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/timer", + visibility = ["//visibility:public"], + deps = [ + "//test/e2e/framework:go_default_library", + "//test/e2e/perftype:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["timer_test.go"], + importpath = "k8s.io/kubernetes/test/e2e/framework/timer", + library = ":go_default_library", + deps = ["//vendor/github.com/onsi/gomega:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/timer/timer.go b/test/e2e/framework/timer/timer.go new file mode 100644 index 00000000000..37769cb3449 --- /dev/null +++ b/test/e2e/framework/timer/timer.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package timer + +import ( + "time" + + "bytes" + "fmt" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/perftype" + "sync" +) + +var now = time.Now + +// Represents a phase of a test. Phases can overlap. +type Phase struct { + sequenceNumber int + name string + startTime time.Time + endTime time.Time +} + +func (phase *Phase) ended() bool { + return !phase.endTime.IsZero() +} + +// End marks the phase as ended, unless it had already been ended before. +func (phase *Phase) End() { + if !phase.ended() { + phase.endTime = now() + } +} + +func (phase *Phase) label() string { + return fmt.Sprintf("%03d-%s", phase.sequenceNumber, phase.name) +} + +func (phase *Phase) duration() time.Duration { + endTime := phase.endTime + if !phase.ended() { + endTime = now() + } + return endTime.Sub(phase.startTime) +} + +func (phase *Phase) humanReadable() string { + if phase.ended() { + return fmt.Sprintf("Phase %s: %v\n", phase.label(), phase.duration()) + } else { + return fmt.Sprintf("Phase %s: %v so far\n", phase.label(), phase.duration()) + } +} + +// A TestPhaseTimer groups phases and provides a way to export their measurements as JSON or human-readable text. +// It is safe to use concurrently. +type TestPhaseTimer struct { + lock sync.Mutex + phases []*Phase +} + +// NewTestPhaseTimer creates a new TestPhaseTimer. +func NewTestPhaseTimer() *TestPhaseTimer { + return &TestPhaseTimer{} +} + +// StartPhase starts a new phase. +// sequenceNumber is an integer prepended to phaseName in the output, such that lexicographic sorting +// of phases in perfdash reconstructs the order of execution. Unfortunately it needs to be +// provided manually, since a simple incrementing counter would have the effect that inserting +// a new phase would renumber subsequent phases, breaking the continuity of historical records. +func (timer *TestPhaseTimer) StartPhase(sequenceNumber int, phaseName string) *Phase { + timer.lock.Lock() + defer timer.lock.Unlock() + newPhase := &Phase{sequenceNumber: sequenceNumber, name: phaseName, startTime: now()} + timer.phases = append(timer.phases, newPhase) + return newPhase +} + +func (timer *TestPhaseTimer) SummaryKind() string { + return "TestPhaseTimer" +} + +func (timer *TestPhaseTimer) PrintHumanReadable() string { + buf := bytes.Buffer{} + timer.lock.Lock() + defer timer.lock.Unlock() + for _, phase := range timer.phases { + buf.WriteString(phase.humanReadable()) + } + return buf.String() +} + +func (timer *TestPhaseTimer) PrintJSON() string { + data := perftype.PerfData{ + Version: "v1", + DataItems: []perftype.DataItem{{ + Unit: "s", + Labels: map[string]string{"test": "phases"}, + Data: make(map[string]float64)}}} + timer.lock.Lock() + defer timer.lock.Unlock() + for _, phase := range timer.phases { + data.DataItems[0].Data[phase.label()] = phase.duration().Seconds() + if !phase.ended() { + data.DataItems[0].Labels["ended"] = "false" + } + } + return framework.PrettyPrintJSON(data) +} diff --git a/test/e2e/framework/timer/timer_test.go b/test/e2e/framework/timer/timer_test.go new file mode 100644 index 00000000000..d3dd13c9e9a --- /dev/null +++ b/test/e2e/framework/timer/timer_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package timer + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" +) + +var currentTime time.Time + +func init() { + setCurrentTimeSinceEpoch(0) + now = func() time.Time { return currentTime } +} + +func setCurrentTimeSinceEpoch(duration time.Duration) { + currentTime = time.Unix(0, duration.Nanoseconds()) +} + +func testUsageWithDefer(timer *TestPhaseTimer) { + defer timer.StartPhase(33, "two").End() + setCurrentTimeSinceEpoch(6*time.Second + 500*time.Millisecond) +} + +func TestTimer(t *testing.T) { + RegisterTestingT(t) + + timer := NewTestPhaseTimer() + setCurrentTimeSinceEpoch(1 * time.Second) + phaseOne := timer.StartPhase(1, "one") + setCurrentTimeSinceEpoch(3 * time.Second) + testUsageWithDefer(timer) + + Expect(timer.PrintJSON()).To(MatchJSON(`{ + "version": "v1", + "dataItems": [ + { + "data": { + "001-one": 5.5, + "033-two": 3.5 + }, + "unit": "s", + "labels": { + "test": "phases", + "ended": "false" + } + } + ] + }`)) + Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 5.5s so far +Phase 033-two: 3.5s +`)) + + setCurrentTimeSinceEpoch(7*time.Second + 500*time.Millisecond) + phaseOne.End() + + Expect(timer.PrintJSON()).To(MatchJSON(`{ + "version": "v1", + "dataItems": [ + { + "data": { + "001-one": 6.5, + "033-two": 3.5 + }, + "unit": "s", + "labels": { + "test": "phases" + } + } + ] + }`)) + Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 6.5s +Phase 033-two: 3.5s +`)) +} diff --git a/test/e2e/scalability/BUILD b/test/e2e/scalability/BUILD index decb9a2b9a3..a7027db29c6 100644 --- a/test/e2e/scalability/BUILD +++ b/test/e2e/scalability/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/timer:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 3f906d88bd5..c2ebafe264b 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/timer" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" @@ -76,6 +77,54 @@ type DensityTestConfig struct { DaemonConfigs []*testutils.DaemonConfig } +func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) { + defer testPhase.End() + for _, sc := range dtc.SecretConfigs { + sc.Run() + } +} + +func (dtc *DensityTestConfig) runConfigMapConfigs(testPhase *timer.Phase) { + defer testPhase.End() + for _, cmc := range dtc.ConfigMapConfigs { + cmc.Run() + } +} + +func (dtc *DensityTestConfig) runDaemonConfigs(testPhase *timer.Phase) { + defer testPhase.End() + for _, dc := range dtc.DaemonConfigs { + dc.Run() + } +} + +func (dtc *DensityTestConfig) deleteSecrets(testPhase *timer.Phase) { + defer testPhase.End() + for i := range dtc.SecretConfigs { + dtc.SecretConfigs[i].Stop() + } +} + +func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) { + defer testPhase.End() + for i := range dtc.ConfigMapConfigs { + dtc.ConfigMapConfigs[i].Stop() + } +} + +func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) { + defer testPhase.End() + for i := range dtc.DaemonConfigs { + framework.ExpectNoError(framework.DeleteResourceAndPods( + dtc.ClientSets[i%numberOfClients], + dtc.InternalClientsets[i%numberOfClients], + extensions.Kind("DaemonSet"), + dtc.DaemonConfigs[i].Namespace, + dtc.DaemonConfigs[i].Name, + )) + } +} + func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint { var apiserverMem uint64 var controllerMem uint64 @@ -196,20 +245,16 @@ func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels // runDensityTest will perform a density test and return the time it took for // all pods to start -func runDensityTest(dtc DensityTestConfig) time.Duration { +func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration { defer GinkgoRecover() // Create all secrets, configmaps and daemons. - for i := range dtc.SecretConfigs { - dtc.SecretConfigs[i].Run() - } - for i := range dtc.ConfigMapConfigs { - dtc.ConfigMapConfigs[i].Run() - } - for i := range dtc.DaemonConfigs { - dtc.DaemonConfigs[i].Run() - } + dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation")) + dtc.runConfigMapConfigs(testPhaseDurations.StartPhase(260, "configmaps creation")) + dtc.runDaemonConfigs(testPhaseDurations.StartPhase(270, "daemonsets creation")) + replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation") + defer replicationCtrlStartupPhase.End() // Start all replication controllers. startTime := time.Now() wg := sync.WaitGroup{} @@ -231,7 +276,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { close(logStopCh) framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime) framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second)) + replicationCtrlStartupPhase.End() + printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation") + defer printPodAllocationPhase.End() // Print some data about Pod to Node allocation By("Printing Pod to Node allocation data") podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) @@ -253,11 +301,14 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { for _, node := range nodeNames { framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node]) } + defer printPodAllocationPhase.End() return startupTime } -func cleanupDensityTest(dtc DensityTestConfig) { +func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) { defer GinkgoRecover() + podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion") + defer podCleanupPhase.End() By("Deleting created Collections") numberOfClients := len(dtc.ClientSets) // We explicitly delete all pods to have API calls necessary for deletion accounted in metrics. @@ -275,23 +326,11 @@ func cleanupDensityTest(dtc DensityTestConfig) { framework.ExpectNoError(err) } } + podCleanupPhase.End() - // Delete all secrets, configmaps and daemons. - for i := range dtc.SecretConfigs { - dtc.SecretConfigs[i].Stop() - } - for i := range dtc.ConfigMapConfigs { - dtc.ConfigMapConfigs[i].Stop() - } - for i := range dtc.DaemonConfigs { - framework.ExpectNoError(framework.DeleteResourceAndPods( - dtc.ClientSets[i%numberOfClients], - dtc.InternalClientsets[i%numberOfClients], - extensions.Kind("DaemonSet"), - dtc.DaemonConfigs[i].Namespace, - dtc.DaemonConfigs[i].Name, - )) - } + dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion")) + dtc.deleteConfigMaps(testPhaseDurations.StartPhase(920, "configmaps deletion")) + dtc.deleteDaemonSets(numberOfClients, testPhaseDurations.StartPhase(930, "daemonsets deletion")) } // This test suite can take a long time to run, and can affect or be affected by other tests. @@ -316,6 +355,7 @@ var _ = SIGDescribe("Density", func() { testCaseBaseName := "density" missingMeasurements := 0 + var testPhaseDurations *timer.TestPhaseTimer // Gathers data prior to framework namespace teardown AfterEach(func() { @@ -349,6 +389,7 @@ var _ = SIGDescribe("Density", func() { if err == nil { summaries = append(summaries, latency) } + summaries = append(summaries, testPhaseDurations) framework.PrintSummaries(summaries, testCaseBaseName) @@ -368,6 +409,7 @@ var _ = SIGDescribe("Density", func() { BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name + testPhaseDurations = timer.NewTestPhaseTimer() masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c) nodeCount = len(nodes.Items) @@ -454,6 +496,8 @@ var _ = SIGDescribe("Density", func() { ) itArg := testArg It(name, func() { + nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation") + defer nodePrepPhase.End() nodePreparer := framework.NewE2ETestNodePreparer( f.ClientSet, []testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}}, @@ -469,10 +513,11 @@ var _ = SIGDescribe("Density", func() { fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid)) framework.ExpectNoError(err) defer fileHndl.Close() + nodePrepPhase.End() // nodeCountPerNamespace and CreateNamespaces are defined in load.go numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace - namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode)) + namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation")) framework.ExpectNoError(err) configs := make([]testutils.RunObjectConfig, numberOfCollections) @@ -567,7 +612,7 @@ var _ = SIGDescribe("Density", func() { LogFunc: framework.Logf, }) } - e2eStartupTime = runDensityTest(dConfig) + e2eStartupTime = runDensityTest(dConfig, testPhaseDurations) if itArg.runLatencyTest { By("Scheduling additional Pods to measure startup latencies") @@ -650,6 +695,8 @@ var _ = SIGDescribe("Density", func() { } // Create some additional pods with throughput ~5 pods/sec. + latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation") + defer latencyPodStartupPhase.End() var wg sync.WaitGroup wg.Add(nodeCount) // Explicitly set requests here. @@ -673,7 +720,10 @@ var _ = SIGDescribe("Density", func() { time.Sleep(200 * time.Millisecond) } wg.Wait() + latencyPodStartupPhase.End() + latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement") + defer latencyMeasurementPhase.End() By("Waiting for all Pods begin observed by the watch...") waitTimeout := 10 * time.Minute for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) { @@ -769,17 +819,20 @@ var _ = SIGDescribe("Density", func() { framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency)) framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c) + latencyMeasurementPhase.End() By("Removing additional replication controllers") + podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion") + defer podDeletionPhase.End() deleteRC := func(i int) { defer GinkgoRecover() name := additionalPodsPrefix + "-" + strconv.Itoa(i+1) framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name)) } workqueue.Parallelize(25, nodeCount, deleteRC) + podDeletionPhase.End() } - - cleanupDensityTest(dConfig) + cleanupDensityTest(dConfig, testPhaseDurations) }) } }) diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 136d10e4985..8b7f3405f36 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -43,6 +43,7 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/timer" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" @@ -88,6 +89,7 @@ var _ = SIGDescribe("Load capacity", func() { var configMapConfigs []*testutils.ConfigMapConfig testCaseBaseName := "load" + var testPhaseDurations *timer.TestPhaseTimer // Gathers metrics before teardown // TODO add flag that allows to skip cleanup on failure @@ -96,8 +98,9 @@ var _ = SIGDescribe("Load capacity", func() { highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount) framework.ExpectNoError(err) if err == nil { - summaries := make([]framework.TestDataSummary, 0, 1) + summaries := make([]framework.TestDataSummary, 0, 2) summaries = append(summaries, metrics) + summaries = append(summaries, testPhaseDurations) framework.PrintSummaries(summaries, testCaseBaseName) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") } @@ -123,6 +126,7 @@ var _ = SIGDescribe("Load capacity", func() { f.NamespaceDeletionTimeout = time.Hour BeforeEach(func() { + testPhaseDurations = timer.NewTestPhaseTimer() clientset = f.ClientSet ns = f.Namespace.Name @@ -187,12 +191,14 @@ var _ = SIGDescribe("Load capacity", func() { It(name, func() { // Create a number of namespaces. namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace - namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode)) + namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation")) framework.ExpectNoError(err) totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod) + serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation") + defer serviceCreationPhase.End() if itArg.services { framework.Logf("Creating services") services := generateServicesForConfigs(configs) @@ -204,6 +210,8 @@ var _ = SIGDescribe("Load capacity", func() { workqueue.Parallelize(serviceOperationsParallelism, len(services), createService) framework.Logf("%v Services created.", len(services)) defer func(services []*v1.Service) { + serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion") + defer serviceCleanupPhase.End() framework.Logf("Starting to delete services...") deleteService := func(i int) { defer GinkgoRecover() @@ -216,17 +224,26 @@ var _ = SIGDescribe("Load capacity", func() { } else { framework.Logf("Skipping service creation") } + serviceCreationPhase.End() // Create all secrets. + secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation") + defer secretsCreationPhase.End() for i := range secretConfigs { secretConfigs[i].Run() defer secretConfigs[i].Stop() } + secretsCreationPhase.End() // Create all configmaps. + configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation") + defer configMapsCreationPhase.End() for i := range configMapConfigs { configMapConfigs[i].Run() defer configMapConfigs[i].Stop() } + configMapsCreationPhase.End() // StartDaemon if needed + daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation") + defer daemonSetCreationPhase.End() for i := 0; i < itArg.daemonsPerNode; i++ { daemonName := fmt.Sprintf("load-daemon-%v", i) daemonConfig := &testutils.DaemonConfig{ @@ -246,6 +263,7 @@ var _ = SIGDescribe("Load capacity", func() { )) }(daemonConfig) } + daemonSetCreationPhase.End() // Simulate lifetime of RC: // * create with initial size @@ -264,7 +282,8 @@ var _ = SIGDescribe("Load capacity", func() { // We may want to revisit it in the future. framework.Logf("Starting to create %v objects...", itArg.kind) creatingTime := time.Duration(totalPods/throughput) * time.Second - createAllResources(configs, creatingTime) + + createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation")) By("============================================================================") // We would like to spread scaling replication controllers over time @@ -273,11 +292,11 @@ var _ = SIGDescribe("Load capacity", func() { // The expected number of created/deleted pods is less than totalPods/3. scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second framework.Logf("Starting to scale %v objects first time...", itArg.kind) - scaleAllResources(configs, scalingTime) + scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time")) By("============================================================================") framework.Logf("Starting to scale %v objects second time...", itArg.kind) - scaleAllResources(configs, scalingTime) + scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time")) By("============================================================================") // Cleanup all created replication controllers. @@ -285,7 +304,7 @@ var _ = SIGDescribe("Load capacity", func() { // We may want to revisit it in the future. deletingTime := time.Duration(totalPods/throughput) * time.Second framework.Logf("Starting to delete %v objects...", itArg.kind) - deleteAllResources(configs, deletingTime) + deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion")) }) } }) @@ -534,7 +553,8 @@ func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.Conditio return wait.ExponentialBackoff(backoff, fn) } -func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) { +func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) { + defer testPhase.End() var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -551,7 +571,8 @@ func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creati framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName())) } -func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) { +func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) { + defer testPhase.End() var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -592,7 +613,8 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling framework.ExpectNoError(err) } -func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) { +func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) { + defer testPhase.End() var wg sync.WaitGroup wg.Add(len(configs)) for _, config := range configs { @@ -617,7 +639,8 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti } } -func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) { +func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) { + defer testPhase.End() namespaces := []*v1.Namespace{} for i := 1; i <= namespaceCount; i++ { namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)