mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #35964 from wojtek-t/fix_large_density_test
Fix density test in large clusters
This commit is contained in:
commit
63954ccd0e
@ -57,7 +57,6 @@ type DensityTestConfig struct {
|
|||||||
ClientSet internalclientset.Interface
|
ClientSet internalclientset.Interface
|
||||||
PollInterval time.Duration
|
PollInterval time.Duration
|
||||||
PodCount int
|
PodCount int
|
||||||
Timeout time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
||||||
@ -193,8 +192,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
rcConfig := dtc.Configs[i]
|
rcConfig := dtc.Configs[i]
|
||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
// Call wg.Done() in defer to avoid blocking whole test
|
||||||
|
// in case of error from RunRC.
|
||||||
|
defer wg.Done()
|
||||||
framework.ExpectNoError(framework.RunRC(rcConfig))
|
framework.ExpectNoError(framework.RunRC(rcConfig))
|
||||||
wg.Done()
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
logStopCh := make(chan struct{})
|
logStopCh := make(chan struct{})
|
||||||
@ -389,7 +390,6 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer fileHndl.Close()
|
defer fileHndl.Close()
|
||||||
timeout := 10 * time.Minute
|
|
||||||
|
|
||||||
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
||||||
numberOfRCs := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
numberOfRCs := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||||
@ -397,6 +397,10 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
RCConfigs := make([]testutils.RCConfig, numberOfRCs)
|
RCConfigs := make([]testutils.RCConfig, numberOfRCs)
|
||||||
|
// Since all RCs are created at the same time, timeout for each config
|
||||||
|
// has to assume that it will be run at the very end.
|
||||||
|
podThroughput := 20
|
||||||
|
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
|
||||||
for i := 0; i < numberOfRCs; i++ {
|
for i := 0; i < numberOfRCs; i++ {
|
||||||
RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
||||||
nsName := namespaces[i].Name
|
nsName := namespaces[i].Name
|
||||||
@ -406,6 +410,7 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
Namespace: nsName,
|
Namespace: nsName,
|
||||||
Labels: map[string]string{"type": "densityPod"},
|
Labels: map[string]string{"type": "densityPod"},
|
||||||
PollInterval: itArg.interval,
|
PollInterval: itArg.interval,
|
||||||
|
Timeout: timeout,
|
||||||
PodStatusFile: fileHndl,
|
PodStatusFile: fileHndl,
|
||||||
Replicas: (totalPods + numberOfRCs - 1) / numberOfRCs,
|
Replicas: (totalPods + numberOfRCs - 1) / numberOfRCs,
|
||||||
CpuRequest: nodeCpuCapacity / 100,
|
CpuRequest: nodeCpuCapacity / 100,
|
||||||
@ -420,7 +425,6 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
Configs: RCConfigs,
|
Configs: RCConfigs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: itArg.interval,
|
PollInterval: itArg.interval,
|
||||||
Timeout: timeout,
|
|
||||||
}
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
e2eStartupTime = runDensityTest(dConfig)
|
||||||
if itArg.runLatencyTest {
|
if itArg.runLatencyTest {
|
||||||
@ -530,8 +534,9 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
By("Waiting for all Pods begin observed by the watch...")
|
By("Waiting for all Pods begin observed by the watch...")
|
||||||
|
waitTimeout := 10 * time.Minute
|
||||||
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
|
||||||
if time.Since(start) < timeout {
|
if time.Since(start) < waitTimeout {
|
||||||
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
|
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -671,7 +676,6 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
Configs: RCConfigs,
|
Configs: RCConfigs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: 10 * time.Second,
|
PollInterval: 10 * time.Second,
|
||||||
Timeout: 10 * time.Minute,
|
|
||||||
}
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
e2eStartupTime = runDensityTest(dConfig)
|
||||||
cleanupDensityTest(dConfig)
|
cleanupDensityTest(dConfig)
|
||||||
|
Loading…
Reference in New Issue
Block a user