e2e framework: move log size verification into framework/debug

This helps getting rid of the ssh dependency. The same init package as for
dumping namespaces takes care of adding the functionality back to framework
instances.
This commit is contained in:
Patrick Ohly 2022-08-25 18:19:16 +02:00
parent 802451b6ca
commit f9bc4f837b
3 changed files with 39 additions and 33 deletions

View File

@ -14,10 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
// Package init sets debug.DumpAllNamespaceInfo as implementation in the framework. // Package init sets debug.DumpAllNamespaceInfo as implementation in the framework
// and enables log size verification.
package init package init
import ( import (
"sync"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/debug" "k8s.io/kubernetes/test/e2e/framework/debug"
) )
@ -28,6 +32,31 @@ func init() {
f.DumpAllNamespaceInfo = func(f *framework.Framework, ns string) { f.DumpAllNamespaceInfo = func(f *framework.Framework, ns string) {
debug.DumpAllNamespaceInfo(f.ClientSet, ns) debug.DumpAllNamespaceInfo(f.ClientSet, ns)
} }
if framework.TestContext.GatherLogsSizes {
var (
wg sync.WaitGroup
closeChannel chan bool
verifier *debug.LogsSizeVerifier
)
ginkgo.BeforeEach(func() {
wg.Add(1)
closeChannel = make(chan bool)
verifier = debug.NewLogsVerifier(f.ClientSet, closeChannel)
go func() {
defer wg.Done()
verifier.Run()
}()
ginkgo.DeferCleanup(func() {
ginkgo.By("Gathering log sizes data", func() {
close(closeChannel)
wg.Wait()
f.TestSummaries = append(f.TestSummaries, verifier.GetSummary())
})
})
})
}
}, },
) )
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package debug
import ( import (
"bytes" "bytes"
@ -27,7 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) "k8s.io/kubernetes/test/e2e/framework"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
@ -109,7 +109,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
// PrintJSON returns the summary of log size data with JSON format. // PrintJSON returns the summary of log size data with JSON format.
func (s *LogsSizeDataSummary) PrintJSON() string { func (s *LogsSizeDataSummary) PrintJSON() string {
return PrettyPrintJSON(*s) return framework.PrettyPrintJSON(*s)
} }
// SummaryKind returns the summary of log size data summary. // SummaryKind returns the summary of log size data summary.
@ -158,8 +158,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed // NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier { func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c) nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err) framework.ExpectNoError(err)
instanceAddress := APIAddress() + ":22" instanceAddress := framework.APIAddress() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1) workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo) workers := make([]*LogSizeGatherer, workersNo)
@ -256,13 +256,13 @@ func (g *LogSizeGatherer) Work() bool {
sshResult, err := e2essh.SSH( sshResult, err := e2essh.SSH(
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")), fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip, workItem.ip,
TestContext.Provider, framework.TestContext.Provider,
) )
if err != nil { if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err) framework.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
// In case of repeated error give up. // In case of repeated error give up.
if workItem.backoffMultiplier >= 128 { if workItem.backoffMultiplier >= 128 {
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip) framework.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
g.wg.Done() g.wg.Done()
return false return false
} }
@ -278,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
path := results[i] path := results[i]
size, err := strconv.Atoi(results[i+1]) size, err := strconv.Atoi(results[i+1])
if err != nil { if err != nil {
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err) framework.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue continue
} }
g.data.addNewData(workItem.ip, path, now, size) g.data.addNewData(workItem.ip, path, now, size)

View File

@ -28,7 +28,6 @@ import (
"os" "os"
"path" "path"
"strings" "strings"
"sync"
"time" "time"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -106,10 +105,6 @@ type Framework struct {
// as expectations vary greatly. Constraints are grouped by the container names. // as expectations vary greatly. Constraints are grouped by the container names.
AddonResourceConstraints map[string]ResourceConstraint AddonResourceConstraints map[string]ResourceConstraint
logsSizeWaitGroup sync.WaitGroup
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// Flaky operation failures in an e2e test can be captured through this. // Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport flakeReport *FlakeReport
@ -287,17 +282,6 @@ func (f *Framework) BeforeEach() {
} }
} }
if TestContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
f.flakeReport = NewFlakeReport() f.flakeReport = NewFlakeReport()
} }
@ -416,13 +400,6 @@ func (f *Framework) AfterEach() {
f.TestSummaries = append(f.TestSummaries, summary) f.TestSummaries = append(f.TestSummaries, summary)
} }
if TestContext.GatherLogsSizes {
ginkgo.By("Gathering log sizes data")
close(f.logsSizeCloseChannel)
f.logsSizeWaitGroup.Wait()
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
}
TestContext.CloudConfig.Provider.FrameworkAfterEach(f) TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
// Report any flakes that were observed in the e2e test and reset. // Report any flakes that were observed in the e2e test and reset.