mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #42507 from crassirostris/cluster-logging-tests-uniform-load-distribution
Automatic merge from submit-queue (batch tested with PRs 43149, 41399, 43154, 43569, 42507) Distribute load in cluster load tests uniformly This PR makes cluster logging load tests distribute logging uniformly, to avoid situation, where 80% of pods are allocated on one node and overall results are worse then it could be.
This commit is contained in:
commit
8d7ba2bea2
@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
|
||||
framework.ExpectNoError(err, "Fluentd deployed incorrectly")
|
||||
|
||||
By("Running synthetic logger")
|
||||
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute)
|
||||
pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute)
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))
|
||||
|
@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
|
||||
framework.ExpectNoError(err, "Fluentd deployed incorrectly")
|
||||
|
||||
By("Running synthetic logger")
|
||||
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute)
|
||||
pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute)
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -40,7 +40,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
gclLogsProvider, err := newGclLogsProvider(f)
|
||||
framework.ExpectNoError(err, "Failed to create GCL logs provider")
|
||||
|
||||
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items
|
||||
nodeCount := len(nodes)
|
||||
podCount := 30 * nodeCount
|
||||
loggingDuration := 10 * time.Minute
|
||||
linesPerSecond := 1000 * nodeCount
|
||||
@ -50,8 +51,9 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
By("Running logs generator pods")
|
||||
pods := []*loggingPod{}
|
||||
for podIdx := 0; podIdx < podCount; podIdx++ {
|
||||
podName := f.Namespace.Name + "-logs-generator-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(podIdx)
|
||||
pods = append(pods, createLoggingPod(f, podName, linesPerPod, loggingDuration))
|
||||
node := nodes[podIdx%len(nodes)]
|
||||
podName := fmt.Sprintf("logs-generator-%d-%d", linesPerPod, podIdx)
|
||||
pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, loggingDuration))
|
||||
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
}
|
||||
@ -79,8 +81,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
gclLogsProvider, err := newGclLogsProvider(f)
|
||||
framework.ExpectNoError(err, "Failed to create GCL logs provider")
|
||||
|
||||
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)
|
||||
maxPodCount := 10 * nodeCount
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items
|
||||
maxPodCount := 10
|
||||
jobDuration := 1 * time.Minute
|
||||
linesPerPodPerSecond := 100
|
||||
testDuration := 10 * time.Minute
|
||||
@ -92,13 +94,13 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
|
||||
By("Running short-living pods")
|
||||
pods := []*loggingPod{}
|
||||
for i := 0; i < podRunCount; i++ {
|
||||
podName := f.Namespace.Name + "-job-logs-generator-" +
|
||||
strconv.Itoa(maxPodCount) + "-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(i)
|
||||
pods = append(pods, createLoggingPod(f, podName, linesPerPod, jobDuration))
|
||||
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
for runIdx := 0; runIdx < podRunCount; runIdx++ {
|
||||
for _, node := range nodes {
|
||||
podName := fmt.Sprintf("job-logs-generator-%d-%d-%d", maxPodCount, linesPerPod, runIdx)
|
||||
pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, jobDuration))
|
||||
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
}
|
||||
time.Sleep(podRunDelay)
|
||||
}
|
||||
|
||||
|
@ -91,9 +91,9 @@ func (entry *logEntry) getLogEntryNumber() (int, bool) {
|
||||
return lineNumber, err == nil
|
||||
}
|
||||
|
||||
func createLoggingPod(f *framework.Framework, podName string, totalLines int, loggingDuration time.Duration) *loggingPod {
|
||||
func createLoggingPod(f *framework.Framework, podName string, nodeName string, totalLines int, loggingDuration time.Duration) *loggingPod {
|
||||
framework.Logf("Starting pod %s", podName)
|
||||
createLogsGeneratorPod(f, podName, totalLines, loggingDuration)
|
||||
createLogsGeneratorPod(f, podName, nodeName, totalLines, loggingDuration)
|
||||
|
||||
return &loggingPod{
|
||||
Name: podName,
|
||||
@ -104,7 +104,7 @@ func createLoggingPod(f *framework.Framework, podName string, totalLines int, lo
|
||||
}
|
||||
}
|
||||
|
||||
func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount int, duration time.Duration) {
|
||||
func createLogsGeneratorPod(f *framework.Framework, podName string, nodeName string, linesCount int, duration time.Duration) {
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -137,6 +137,7 @@ func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount i
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user