mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Implement JSON printing for test summaries
This commit is contained in:
parent
ab6edd8170
commit
f5a41823c2
@ -152,7 +152,7 @@ func (f *Framework) afterEach() {
|
|||||||
|
|
||||||
summaries := make([]TestDataSummary, 0)
|
summaries := make([]TestDataSummary, 0)
|
||||||
if testContext.GatherKubeSystemResourceUsageData {
|
if testContext.GatherKubeSystemResourceUsageData {
|
||||||
summaries = append(summaries, f.gatherer.stopAndSummarize([]int{50, 90, 99, 100}, f.addonResourceConstraints))
|
summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99}, f.addonResourceConstraints))
|
||||||
}
|
}
|
||||||
|
|
||||||
if testContext.GatherLogsSizes {
|
if testContext.GatherLogsSizes {
|
||||||
|
@ -75,8 +75,15 @@ type LogsSizeVerifier struct {
|
|||||||
workers []*LogSizeGatherer
|
workers []*LogSizeGatherer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SingleLogSummary struct {
|
||||||
|
AverageGenerationRate int
|
||||||
|
NumberOfProbes int
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
||||||
|
|
||||||
// node -> file -> data
|
// node -> file -> data
|
||||||
type LogsSizeDataSummary map[string]map[string][]TimestampedSize
|
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
||||||
|
|
||||||
// TODO: make sure that we don't need locking here
|
// TODO: make sure that we don't need locking here
|
||||||
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||||
@ -86,12 +93,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
|||||||
for k, v := range *s {
|
for k, v := range *s {
|
||||||
fmt.Fprintf(w, "%v\t\t\t\n", k)
|
fmt.Fprintf(w, "%v\t\t\t\n", k)
|
||||||
for path, data := range v {
|
for path, data := range v {
|
||||||
if len(data) > 1 {
|
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, data.AverageGenerationRate, data.NumberOfProbes)
|
||||||
last := data[len(data)-1]
|
|
||||||
first := data[0]
|
|
||||||
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
|
||||||
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, rate, len(data))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.Flush()
|
w.Flush()
|
||||||
@ -99,11 +101,11 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||||
return "JSON printer not implemented for LogsSizeDataSummary"
|
return prettyPrintJSON(*s)
|
||||||
}
|
}
|
||||||
|
|
||||||
type LogsSizeData struct {
|
type LogsSizeData struct {
|
||||||
data LogsSizeDataSummary
|
data LogSizeDataTimeseries
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,7 +118,7 @@ type WorkItem struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
|
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
|
||||||
data := make(LogsSizeDataSummary)
|
data := make(LogSizeDataTimeseries)
|
||||||
ips := append(nodeAddresses, masterAddress)
|
ips := append(nodeAddresses, masterAddress)
|
||||||
for _, ip := range ips {
|
for _, ip := range ips {
|
||||||
data[ip] = make(map[string][]TimestampedSize)
|
data[ip] = make(map[string][]TimestampedSize)
|
||||||
@ -170,9 +172,24 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
|
|||||||
return verifier
|
return verifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintData returns a string with formated results
|
// GetSummary returns a summary (average generation rate and number of probes) of the data gathered by LogSizeVerifier
|
||||||
func (v *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
||||||
return &v.data.data
|
result := make(LogsSizeDataSummary)
|
||||||
|
for k, v := range s.data.data {
|
||||||
|
result[k] = make(map[string]SingleLogSummary)
|
||||||
|
for path, data := range v {
|
||||||
|
if len(data) > 1 {
|
||||||
|
last := data[len(data)-1]
|
||||||
|
first := data[0]
|
||||||
|
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
||||||
|
result[k][path] = SingleLogSummary{
|
||||||
|
AverageGenerationRate: rate,
|
||||||
|
NumberOfProbes: len(data),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
||||||
|
@ -70,7 +70,7 @@ func (m *metricsForE2E) PrintHumanReadable() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *metricsForE2E) PrintJSON() string {
|
func (m *metricsForE2E) PrintJSON() string {
|
||||||
return "JSON printer not implemented for Metrics"
|
return prettyPrintJSON(*m)
|
||||||
}
|
}
|
||||||
|
|
||||||
var InterestingApiServerMetrics = []string{
|
var InterestingApiServerMetrics = []string{
|
||||||
@ -402,10 +402,12 @@ func VerifySchedulerLatency(c *client.Client) error {
|
|||||||
func prettyPrintJSON(metrics interface{}) string {
|
func prettyPrintJSON(metrics interface{}) string {
|
||||||
output := &bytes.Buffer{}
|
output := &bytes.Buffer{}
|
||||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||||
|
Logf("Error building encoder: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
formatted := &bytes.Buffer{}
|
formatted := &bytes.Buffer{}
|
||||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||||
|
Logf("Error indenting: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return string(formatted.Bytes())
|
return string(formatted.Bytes())
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
@ -48,12 +49,13 @@ type containerResourceGatherer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type singleContainerSummary struct {
|
type singleContainerSummary struct {
|
||||||
name string
|
Name string
|
||||||
cpu float64
|
Cpu float64
|
||||||
mem int64
|
Mem int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResourceUsageSummary map[int][]singleContainerSummary
|
// we can't have int here, as JSON does not accept integer keys.
|
||||||
|
type ResourceUsageSummary map[string][]singleContainerSummary
|
||||||
|
|
||||||
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
@ -62,7 +64,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
|||||||
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
|
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
|
||||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
|
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
|
||||||
for _, summary := range summaries {
|
for _, summary := range summaries {
|
||||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.name, summary.cpu, float64(summary.mem)/(1024*1024))
|
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024))
|
||||||
}
|
}
|
||||||
w.Flush()
|
w.Flush()
|
||||||
}
|
}
|
||||||
@ -70,7 +72,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResourceUsageSummary) PrintJSON() string {
|
func (s *ResourceUsageSummary) PrintJSON() string {
|
||||||
return "JSON printer not implemented for ResourceUsageSummary"
|
return prettyPrintJSON(*s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *containerResourceGatherer) startGatheringData(c *client.Client, period time.Duration) {
|
func (g *containerResourceGatherer) startGatheringData(c *client.Client, period time.Duration) {
|
||||||
@ -115,10 +117,10 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai
|
|||||||
for _, perc := range percentiles {
|
for _, perc := range percentiles {
|
||||||
for _, name := range sortedKeys {
|
for _, name := range sortedKeys {
|
||||||
usage := stats[perc][name]
|
usage := stats[perc][name]
|
||||||
summary[perc] = append(summary[perc], singleContainerSummary{
|
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], singleContainerSummary{
|
||||||
name: name,
|
Name: name,
|
||||||
cpu: usage.CPUUsageInCores,
|
Cpu: usage.CPUUsageInCores,
|
||||||
mem: usage.MemoryWorkingSetInBytes,
|
Mem: usage.MemoryWorkingSetInBytes,
|
||||||
})
|
})
|
||||||
// Verifying 99th percentile of resource usage
|
// Verifying 99th percentile of resource usage
|
||||||
if perc == 99 {
|
if perc == 99 {
|
||||||
|
Loading…
Reference in New Issue
Block a user