mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 00:07:50 +00:00
Loosen requirements of cluster logging e2e tests, make them more stable
This commit is contained in:
parent
223c721d6e
commit
c4660222e6
@ -45,10 +45,10 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
|
||||
framework.ExpectNoError(err, "Elasticsearch is not working")
|
||||
|
||||
By("Running synthetic logger")
|
||||
pod := createLoggingPod(f, podName, 100, 1*time.Second)
|
||||
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute)
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", podName))
|
||||
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))
|
||||
|
||||
By("Waiting for logs to ingest")
|
||||
config := &loggingTestConfig{
|
||||
@ -58,7 +58,6 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
|
||||
MaxAllowedLostFraction: 0,
|
||||
MaxAllowedFluentdRestarts: 0,
|
||||
}
|
||||
err = waitForLogsIngestion(f, config)
|
||||
framework.ExpectNoError(err, "Failed to ingest logs")
|
||||
framework.ExpectNoError(waitForSomeLogs(f, config), "Failed to ingest logs")
|
||||
})
|
||||
})
|
||||
|
@ -26,8 +26,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// TODO(crassirostris): Remove Flaky once test is stable
|
||||
var _ = framework.KubeDescribe("Cluster level logging using GCL [Flaky]", func() {
|
||||
var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
|
||||
f := framework.NewDefaultFramework("gcl-logging")
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -44,10 +43,10 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Flaky]", func()
|
||||
framework.ExpectNoError(err, "GCL is not working")
|
||||
|
||||
By("Running synthetic logger")
|
||||
pod := createLoggingPod(f, podName, 100, 1*time.Second)
|
||||
pod := createLoggingPod(f, podName, 10*60, 10*time.Minute)
|
||||
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{})
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", podName))
|
||||
err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName))
|
||||
|
||||
By("Waiting for logs to ingest")
|
||||
config := &loggingTestConfig{
|
||||
@ -57,7 +56,6 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Flaky]", func()
|
||||
MaxAllowedLostFraction: 0,
|
||||
MaxAllowedFluentdRestarts: 0,
|
||||
}
|
||||
err = waitForLogsIngestion(f, config)
|
||||
framework.ExpectNoError(err, "Failed to ingest logs")
|
||||
framework.ExpectNoError(waitForSomeLogs(f, config), "Failed to ingest logs")
|
||||
})
|
||||
})
|
||||
|
@ -67,7 +67,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
MaxAllowedLostFraction: loadTestMaxAllowedLostFraction,
|
||||
MaxAllowedFluentdRestarts: loadTestMaxAllowedFluentdRestarts,
|
||||
}
|
||||
err = waitForLogsIngestion(f, config)
|
||||
err = waitForFullLogsIngestion(f, config)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to ingest logs: %v", err)
|
||||
} else {
|
||||
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]",
|
||||
MaxAllowedLostFraction: loadTestMaxAllowedLostFraction,
|
||||
MaxAllowedFluentdRestarts: loadTestMaxAllowedFluentdRestarts,
|
||||
}
|
||||
err = waitForLogsIngestion(f, config)
|
||||
err = waitForFullLogsIngestion(f, config)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to ingest logs: %v", err)
|
||||
} else {
|
||||
|
@ -141,7 +141,41 @@ func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount i
|
||||
})
|
||||
}
|
||||
|
||||
func waitForLogsIngestion(f *framework.Framework, config *loggingTestConfig) error {
|
||||
func waitForSomeLogs(f *framework.Framework, config *loggingTestConfig) error {
|
||||
podHasIngestedLogs := make([]bool, len(config.Pods))
|
||||
podWithIngestedLogsCount := 0
|
||||
|
||||
for start := time.Now(); podWithIngestedLogsCount < len(config.Pods) && time.Since(start) < config.IngestionTimeout; time.Sleep(ingestionRetryDelay) {
|
||||
for podIdx, pod := range config.Pods {
|
||||
if podHasIngestedLogs[podIdx] {
|
||||
continue
|
||||
}
|
||||
|
||||
entries := config.LogsProvider.ReadEntries(pod)
|
||||
if len(entries) == 0 {
|
||||
framework.Logf("No log entries from pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if _, ok := entry.getLogEntryNumber(); ok {
|
||||
framework.Logf("Found some log entries from pod %s", pod.Name)
|
||||
podHasIngestedLogs[podIdx] = true
|
||||
podWithIngestedLogsCount++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if podWithIngestedLogsCount < len(config.Pods) {
|
||||
return fmt.Errorf("some logs were ingested for %d pods out of %d", podWithIngestedLogsCount, len(config.Pods))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForFullLogsIngestion(f *framework.Framework, config *loggingTestConfig) error {
|
||||
expectedLinesNumber := 0
|
||||
for _, pod := range config.Pods {
|
||||
expectedLinesNumber += pod.ExpectedLinesNumber
|
||||
|
Loading…
Reference in New Issue
Block a user