mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Introduce scheduler CPU/Memory profile-gathering in density test
This commit is contained in:
parent
457548ef7d
commit
0c787703f5
@ -64,6 +64,9 @@ var MaxContainerFailures = 0
|
|||||||
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
|
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
|
||||||
var MaxMissingPodStartupMeasurements = 0
|
var MaxMissingPodStartupMeasurements = 0
|
||||||
|
|
||||||
|
// Number of nodes in the cluster (computed inside BeforeEach).
|
||||||
|
var nodeCount = 0
|
||||||
|
|
||||||
type DensityTestConfig struct {
|
type DensityTestConfig struct {
|
||||||
Configs []testutils.RunObjectConfig
|
Configs []testutils.RunObjectConfig
|
||||||
ClientSets []clientset.Interface
|
ClientSets []clientset.Interface
|
||||||
@ -285,6 +288,11 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
|||||||
|
|
||||||
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
|
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
|
||||||
defer replicationCtrlStartupPhase.End()
|
defer replicationCtrlStartupPhase.End()
|
||||||
|
|
||||||
|
// Start scheduler CPU profile-gatherer before we begin cluster saturation.
|
||||||
|
profileGatheringDelay := time.Duration(1+nodeCount/100) * time.Minute
|
||||||
|
schedulerProfilingStopCh := framework.StartCPUProfileGatherer("kube-scheduler", "density", profileGatheringDelay)
|
||||||
|
|
||||||
// Start all replication controllers.
|
// Start all replication controllers.
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
@ -304,10 +312,16 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
startupTime := time.Since(startTime)
|
startupTime := time.Since(startTime)
|
||||||
close(logStopCh)
|
close(logStopCh)
|
||||||
|
close(schedulerProfilingStopCh)
|
||||||
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
|
||||||
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
|
||||||
replicationCtrlStartupPhase.End()
|
replicationCtrlStartupPhase.End()
|
||||||
|
|
||||||
|
// Grabbing scheduler memory profile after cluster saturation finished.
|
||||||
|
wg.Add(1)
|
||||||
|
framework.GatherMemoryProfile("kube-scheduler", "density", &wg)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
|
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
|
||||||
defer printPodAllocationPhase.End()
|
defer printPodAllocationPhase.End()
|
||||||
// Print some data about Pod to Node allocation
|
// Print some data about Pod to Node allocation
|
||||||
@ -366,7 +380,6 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
|
|||||||
// limits on Docker's concurrent container startup.
|
// limits on Docker's concurrent container startup.
|
||||||
var _ = SIGDescribe("Density", func() {
|
var _ = SIGDescribe("Density", func() {
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
var nodeCount int
|
|
||||||
var additionalPodsPrefix string
|
var additionalPodsPrefix string
|
||||||
var ns string
|
var ns string
|
||||||
var uuid string
|
var uuid string
|
||||||
|
Loading…
Reference in New Issue
Block a user