Merge pull request #42507 from crassirostris/cluster-logging-tests-uniform-load-distribution

Automatic merge from submit-queue (batch tested with PRs 43149, 41399, 43154, 43569, 42507)

Distribute load in cluster load tests uniformly

This PR makes cluster logging load tests distribute logging uniformly, to avoid situation, where 80% of pods are allocated on one node and overall results are worse then it could be.
This commit is contained in:
Kubernetes Submit Queue 2017-03-26 00:55:25 -07:00 committed by GitHub
commit 8d7ba2bea2
4 changed files with 20 additions and 17 deletions

View File

@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
framework.ExpectNoError(err, "Fluentd deployed incorrectly") framework.ExpectNoError(err, "Fluentd deployed incorrectly")
By("Running synthetic logger") By("Running synthetic logger")
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute) pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute)
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))

View File

@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
framework.ExpectNoError(err, "Fluentd deployed incorrectly") framework.ExpectNoError(err, "Fluentd deployed incorrectly")
By("Running synthetic logger") By("Running synthetic logger")
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute) pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute)
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))

View File

@ -17,7 +17,7 @@ limitations under the License.
package e2e package e2e
import ( import (
"strconv" "fmt"
"time" "time"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -40,7 +40,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
gclLogsProvider, err := newGclLogsProvider(f) gclLogsProvider, err := newGclLogsProvider(f)
framework.ExpectNoError(err, "Failed to create GCL logs provider") framework.ExpectNoError(err, "Failed to create GCL logs provider")
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items
nodeCount := len(nodes)
podCount := 30 * nodeCount podCount := 30 * nodeCount
loggingDuration := 10 * time.Minute loggingDuration := 10 * time.Minute
linesPerSecond := 1000 * nodeCount linesPerSecond := 1000 * nodeCount
@ -50,8 +51,9 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
By("Running logs generator pods") By("Running logs generator pods")
pods := []*loggingPod{} pods := []*loggingPod{}
for podIdx := 0; podIdx < podCount; podIdx++ { for podIdx := 0; podIdx < podCount; podIdx++ {
podName := f.Namespace.Name + "-logs-generator-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(podIdx) node := nodes[podIdx%len(nodes)]
pods = append(pods, createLoggingPod(f, podName, linesPerPod, loggingDuration)) podName := fmt.Sprintf("logs-generator-%d-%d", linesPerPod, podIdx)
pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, loggingDuration))
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
} }
@ -79,8 +81,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
gclLogsProvider, err := newGclLogsProvider(f) gclLogsProvider, err := newGclLogsProvider(f)
framework.ExpectNoError(err, "Failed to create GCL logs provider") framework.ExpectNoError(err, "Failed to create GCL logs provider")
nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items
maxPodCount := 10 * nodeCount maxPodCount := 10
jobDuration := 1 * time.Minute jobDuration := 1 * time.Minute
linesPerPodPerSecond := 100 linesPerPodPerSecond := 100
testDuration := 10 * time.Minute testDuration := 10 * time.Minute
@ -92,13 +94,13 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
By("Running short-living pods") By("Running short-living pods")
pods := []*loggingPod{} pods := []*loggingPod{}
for i := 0; i < podRunCount; i++ { for runIdx := 0; runIdx < podRunCount; runIdx++ {
podName := f.Namespace.Name + "-job-logs-generator-" + for _, node := range nodes {
strconv.Itoa(maxPodCount) + "-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(i) podName := fmt.Sprintf("job-logs-generator-%d-%d-%d", maxPodCount, linesPerPod, runIdx)
pods = append(pods, createLoggingPod(f, podName, linesPerPod, jobDuration)) pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, jobDuration))
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
}
time.Sleep(podRunDelay) time.Sleep(podRunDelay)
} }

View File

@ -91,9 +91,9 @@ func (entry *logEntry) getLogEntryNumber() (int, bool) {
return lineNumber, err == nil return lineNumber, err == nil
} }
func createLoggingPod(f *framework.Framework, podName string, totalLines int, loggingDuration time.Duration) *loggingPod { func createLoggingPod(f *framework.Framework, podName string, nodeName string, totalLines int, loggingDuration time.Duration) *loggingPod {
framework.Logf("Starting pod %s", podName) framework.Logf("Starting pod %s", podName)
createLogsGeneratorPod(f, podName, totalLines, loggingDuration) createLogsGeneratorPod(f, podName, nodeName, totalLines, loggingDuration)
return &loggingPod{ return &loggingPod{
Name: podName, Name: podName,
@ -104,7 +104,7 @@ func createLoggingPod(f *framework.Framework, podName string, totalLines int, lo
} }
} }
func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount int, duration time.Duration) { func createLogsGeneratorPod(f *framework.Framework, podName string, nodeName string, linesCount int, duration time.Duration) {
f.PodClient().Create(&api_v1.Pod{ f.PodClient().Create(&api_v1.Pod{
ObjectMeta: meta_v1.ObjectMeta{ ObjectMeta: meta_v1.ObjectMeta{
Name: podName, Name: podName,
@ -137,6 +137,7 @@ func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount i
}, },
}, },
}, },
NodeName: nodeName,
}, },
}) })
} }