mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
e2e lifecycle: use millisecond resolution for logs
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
This commit is contained in:
parent
dd68c5f240
commit
851d149a88
@ -22,7 +22,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -62,9 +61,9 @@ func ExecCommand(name string, c execCommand) []string {
|
||||
// all outputs are in the format of:
|
||||
// time-since-boot timestamp container-name message
|
||||
|
||||
// The busybox time command doesn't support sub-second display. uptime displays in hundredths of a second, so we
|
||||
// include both and use time since boot for relative ordering of file entries
|
||||
timeCmd := "`date +%s` `cat /proc/uptime | awk '{print $1}'`"
|
||||
// The busybox time command doesn't support sub-second display.
|
||||
// We have to use nmeter to get the milliseconds part.
|
||||
timeCmd := "`date -u +%FT$(nmeter -d0 '%3t' | head -n1)Z`"
|
||||
containerName := name
|
||||
if c.ContainerName != "" {
|
||||
containerName = c.ContainerName
|
||||
@ -106,9 +105,7 @@ func sleepCommand(seconds int) string {
|
||||
|
||||
type containerOutput struct {
|
||||
// time the message was seen to the nearest second
|
||||
timestamp time.Time
|
||||
// time the message was seen since the host booted, to the nearest hundredth of a second
|
||||
timeSinceBoot float64
|
||||
timestamp time.Time
|
||||
containerName string
|
||||
command string
|
||||
}
|
||||
@ -117,7 +114,7 @@ type containerOutputList []containerOutput
|
||||
func (o containerOutputList) String() string {
|
||||
var b bytes.Buffer
|
||||
for i, v := range o {
|
||||
fmt.Fprintf(&b, "%d) %s %f %s %s\n", i, v.timestamp, v.timeSinceBoot, v.containerName, v.command)
|
||||
fmt.Fprintf(&b, "%d) %s %s %s\n", i, v.timestamp, v.containerName, v.command)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
@ -294,21 +291,22 @@ func (o containerOutputList) findLastIndex(name string, command string) int {
|
||||
return found
|
||||
}
|
||||
|
||||
// TimeOfStart returns the time since the node boot in floating point seconds that the specified container started.
|
||||
func (o containerOutputList) TimeOfStart(name string) (float64, error) {
|
||||
// TimeOfStart returns the UNIX time in milliseconds when the specified container started.
|
||||
func (o containerOutputList) TimeOfStart(name string) (int64, error) {
|
||||
idx := o.findIndex(name, "Starting", 0)
|
||||
if idx == -1 {
|
||||
return 0.0, fmt.Errorf("couldn't find that %s ever started, got\n%v", name, o)
|
||||
return 0, fmt.Errorf("couldn't find that %s ever started, got\n%v", name, o)
|
||||
}
|
||||
return o[idx].timeSinceBoot, nil
|
||||
return o[idx].timestamp.UnixMilli(), nil
|
||||
}
|
||||
|
||||
func (o containerOutputList) TimeOfLastLoop(name string) (float64, error) {
|
||||
// TimeOfLastLoop returns the UNIX time in milliseconds when the specified container last looped.
|
||||
func (o containerOutputList) TimeOfLastLoop(name string) (int64, error) {
|
||||
idx := o.findLastIndex(name, "Looping")
|
||||
if idx == -1 {
|
||||
return 0.0, fmt.Errorf("couldn't find that %s ever looped, got\n%v", name, o)
|
||||
return 0, fmt.Errorf("couldn't find that %s ever looped, got\n%v", name, o)
|
||||
}
|
||||
return o[idx].timeSinceBoot, nil
|
||||
return o[idx].timestamp.UnixMilli(), nil
|
||||
}
|
||||
|
||||
// parseOutput combines the container log from all of the init and regular
|
||||
@ -337,24 +335,21 @@ func parseOutput(ctx context.Context, f *framework.Framework, pod *v1.Pod) conta
|
||||
var res containerOutputList
|
||||
for sc.Scan() {
|
||||
fields := strings.Fields(sc.Text())
|
||||
if len(fields) < 4 {
|
||||
if len(fields) < 3 {
|
||||
framework.ExpectNoError(fmt.Errorf("%v should have at least length 3", fields))
|
||||
}
|
||||
timestamp, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
timeSinceBoot, err := strconv.ParseFloat(fields[1], 64)
|
||||
timestamp, err := time.Parse(time.RFC3339, fields[0])
|
||||
framework.ExpectNoError(err)
|
||||
res = append(res, containerOutput{
|
||||
timestamp: time.Unix(timestamp, 0),
|
||||
timeSinceBoot: timeSinceBoot,
|
||||
containerName: fields[2],
|
||||
command: fields[3],
|
||||
timestamp: timestamp,
|
||||
containerName: fields[1],
|
||||
command: fields[2],
|
||||
})
|
||||
}
|
||||
|
||||
// sort using the timeSinceBoot since it has more precision
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return res[i].timeSinceBoot < res[j].timeSinceBoot
|
||||
return res[i].timestamp.Before(res[j].timestamp)
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
@ -2703,7 +2703,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func(
|
||||
ps3Last, err := results.TimeOfLastLoop(prefixedName(PreStopPrefix, restartableInit3))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
const simulToleration = 0.5
|
||||
const simulToleration = 500 // milliseconds
|
||||
// should all end together since they loop infinitely and exceed their grace period
|
||||
gomega.Expect(ps1Last-ps2Last).To(gomega.BeNumerically("~", 0, simulToleration),
|
||||
fmt.Sprintf("expected PostStart 1 & PostStart 2 to be killed at the same time, got %s", results))
|
||||
@ -2713,12 +2713,12 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func(
|
||||
fmt.Sprintf("expected PostStart 2 & PostStart 3 to be killed at the same time, got %s", results))
|
||||
|
||||
// 30 seconds + 2 second minimum grace for the SIGKILL
|
||||
const lifetimeToleration = 1
|
||||
gomega.Expect(ps1Last-ps1).To(gomega.BeNumerically("~", 32, lifetimeToleration),
|
||||
const lifetimeToleration = 1000 // milliseconds
|
||||
gomega.Expect(ps1Last-ps1).To(gomega.BeNumerically("~", 32000, lifetimeToleration),
|
||||
fmt.Sprintf("expected PostStart 1 to live for ~32 seconds, got %s", results))
|
||||
gomega.Expect(ps2Last-ps2).To(gomega.BeNumerically("~", 32, lifetimeToleration),
|
||||
gomega.Expect(ps2Last-ps2).To(gomega.BeNumerically("~", 32000, lifetimeToleration),
|
||||
fmt.Sprintf("expected PostStart 2 to live for ~32 seconds, got %s", results))
|
||||
gomega.Expect(ps3Last-ps3).To(gomega.BeNumerically("~", 32, lifetimeToleration),
|
||||
gomega.Expect(ps3Last-ps3).To(gomega.BeNumerically("~", 32000, lifetimeToleration),
|
||||
fmt.Sprintf("expected PostStart 3 to live for ~32 seconds, got %s", results))
|
||||
|
||||
})
|
||||
@ -2835,7 +2835,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func(
|
||||
ps3, err := results.TimeOfStart(prefixedName(PreStopPrefix, restartableInit3))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
const toleration = 0.5
|
||||
const toleration = 500 // milliseconds
|
||||
gomega.Expect(ps1-ps2).To(gomega.BeNumerically("~", 0, toleration),
|
||||
fmt.Sprintf("expected PostStart 1 & PostStart 2 to start at the same time, got %s", results))
|
||||
gomega.Expect(ps1-ps3).To(gomega.BeNumerically("~", 0, toleration),
|
||||
|
Loading…
Reference in New Issue
Block a user