rework init containers test to remove host file dependency

Since we can't rely on the test runner and hosts under test to
be on the same machine, we write to the terminate log from each
container and concatenate the results.
This commit is contained in:
Todd Neal 2023-03-08 23:17:17 -06:00
parent bbe0eb7595
commit 78ca93e39c

View File

@ -21,8 +21,8 @@ import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
@ -54,13 +54,17 @@ type containerTestConfig struct {
func (c containerTestConfig) Command() []string {
var cmd bytes.Buffer
// all outputs are in the format of:
// timestamp container-name message
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Starting' >> /shared/output; ", c.Name)
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Delaying %d' >> /shared/output; ", c.Name, c.Delay)
// time-since-boot timestamp container-name message
// The busybox time command doesn't support sub-second display. uptime displays in hundredths of a second, so we
// include both and use time since boot for relative ordering
timeCmd := "`date +%s` `cat /proc/uptime | awk '{print $1}'`"
fmt.Fprintf(&cmd, "echo %s '%s Starting' >> /dev/termination-log; ", timeCmd, c.Name)
fmt.Fprintf(&cmd, "echo %s '%s Delaying %d' >> /dev/termination-log; ", timeCmd, c.Name, c.Delay)
if c.Delay != 0 {
fmt.Fprintf(&cmd, "sleep %d; ", c.Delay)
}
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Exiting' >> /shared/output; ", c.Name)
fmt.Fprintf(&cmd, "echo %s '%s Exiting' >> /dev/termination-log; ", timeCmd, c.Name)
fmt.Fprintf(&cmd, "exit %d", c.ExitCode)
return []string{"sh", "-c", cmd.String()}
}
@ -69,17 +73,6 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
f := framework.NewDefaultFramework("initcontainers-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var tmpDir string
ginkgo.BeforeEach(func() {
var err error
tmpDir, err = os.MkdirTemp("", "init-container-*")
framework.ExpectNoError(err, "creating temp directory")
})
ginkgo.AfterEach(func() {
os.RemoveAll(tmpDir)
})
ginkgo.It("should launch init container serially before a regular container", func() {
init1 := containerTestConfig{
Name: "init-1",
@ -108,29 +101,32 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
/// generates an out file output like:
//
// 1677116487 init-1 Starting
// 1677116487 init-1 Delaying 1
// 1677116488 init-1 Exiting
// 1677116489 init-2 Starting
// 1677116489 init-2 Delaying 1
// 1677116490 init-2 Exiting
// 1677116491 init-3 Starting
// 1677116491 init-3 Delaying 1
// 1677116492 init-3 Exiting
// 1677116493 regular-1 Starting
// 1677116493 regular-1 Delaying 1
// 1677116494 regular-1 Exiting
// 1678337827 45930.43 init-1 Starting
// 1678337827 45930.43 init-1 Delaying 1
// 1678337828 45931.43 init-1 Exiting
// 1678337829 45932.52 init-2 Starting
// 1678337829 45932.53 init-2 Delaying 1
// 1678337830 45933.53 init-2 Exiting
// 1678337831 45934.47 init-3 Starting
// 1678337831 45934.47 init-3 Delaying 1
// 1678337832 45935.47 init-3 Exiting
// 1678337833 45936.58 regular-1 Starting
// 1678337833 45936.58 regular-1 Delaying 1
// 1678337834 45937.58 regular-1 Exiting
podSpec := getContainerOrderingPod("initcontainer-test-pod",
tmpDir, init1, init2, init3, regular1)
init1, init2, init3, regular1)
podSpec = e2epod.NewPodClient(f).Create(context.TODO(), podSpec)
client := e2epod.NewPodClient(f)
podSpec = client.Create(context.TODO(), podSpec)
ginkgo.By("Waiting for the pod to finish")
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(context.TODO(), f.ClientSet, podSpec.Name, podSpec.Namespace, 1*time.Minute)
framework.ExpectNoError(err)
ginkgo.By("Parsing results")
results := parseOutput(tmpDir)
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
results := parseOutput(podSpec)
// which we then use to make assertions regarding container ordering
ginkgo.By("Analyzing results")
@ -159,15 +155,18 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
}
podSpec := getContainerOrderingPod("initcontainer-test-pod-failure",
tmpDir, init1, regular1)
init1, regular1)
podSpec = e2epod.NewPodClient(f).Create(context.TODO(), podSpec)
client := e2epod.NewPodClient(f)
podSpec = client.Create(context.TODO(), podSpec)
ginkgo.By("Waiting for the pod to fail")
err := e2epod.WaitForPodFailedReason(context.TODO(), f.ClientSet, podSpec, "", 1*time.Minute)
framework.ExpectNoError(err)
ginkgo.By("Parsing results")
results := parseOutput(tmpDir)
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
results := parseOutput(podSpec)
ginkgo.By("Analyzing results")
// init container should start and exit with an error, and the regular container should never start
@ -179,8 +178,10 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
})
type containerOutput struct {
line int
timestamp string
// time the message was seen to the nearest second
timestamp time.Time
// time the message was seen since the host booted, to the nearest hundredth of a second
timeSinceBoot float64
containerName string
command string
}
@ -189,7 +190,7 @@ type containerOutputList []containerOutput
func (o containerOutputList) String() string {
var b bytes.Buffer
for _, v := range o {
fmt.Fprintf(&b, "%d %s %s %s\n", v.line, v.timestamp, v.containerName, v.command)
fmt.Fprintf(&b, "%s %f %s %s\n", v.timestamp, v.timeSinceBoot, v.containerName, v.command)
}
return b.String()
}
@ -253,57 +254,62 @@ func (o containerOutputList) Exits(c containerTestConfig) error {
}
func (o containerOutputList) findIndex(name string, command string) int {
for _, v := range o {
for i, v := range o {
if v.containerName == name && v.command == command {
return v.line
return i
}
}
return -1
}
func parseOutput(dir string) containerOutputList {
contents, err := os.ReadFile(filepath.Join(dir, "output"))
framework.ExpectNoError(err, "reading output file")
// parseOutput combines the termination log from all of the init and regular containers and parses/sorts the outputs to
// produce an execution log
func parseOutput(pod *v1.Pod) containerOutputList {
// accumulate all of our statuses
var statuses []v1.ContainerStatus
statuses = append(statuses, pod.Status.InitContainerStatuses...)
statuses = append(statuses, pod.Status.ContainerStatuses...)
var buf bytes.Buffer
for _, cs := range statuses {
if cs.State.Terminated != nil {
buf.WriteString(cs.State.Terminated.Message)
}
}
sc := bufio.NewScanner(bytes.NewReader(contents))
// parse
sc := bufio.NewScanner(&buf)
var res containerOutputList
lineNo := 0
for sc.Scan() {
lineNo++
fields := strings.Fields(sc.Text())
if len(fields) < 3 {
if len(fields) < 4 {
framework.ExpectNoError(fmt.Errorf("%v should have at least length 3", fields))
}
timestamp, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err)
timeSinceBoot, err := strconv.ParseFloat(fields[1], 64)
framework.ExpectNoError(err)
res = append(res, containerOutput{
line: lineNo,
timestamp: fields[0],
containerName: fields[1],
command: fields[2],
timestamp: time.Unix(timestamp, 0),
timeSinceBoot: timeSinceBoot,
containerName: fields[2],
command: fields[3],
})
}
// sort using the timeSinceBoot since it has more precision
sort.Slice(res, func(i, j int) bool {
return res[i].timeSinceBoot < res[j].timeSinceBoot
})
return res
}
func getContainerOrderingPod(podName string, hostDir string, containerConfigs ...containerTestConfig) *v1.Pod {
// all the pods share the given host directory
hostPathDirectory := v1.HostPathDirectory
func getContainerOrderingPod(podName string, containerConfigs ...containerTestConfig) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "shared",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: hostDir,
Type: &hostPathDirectory,
},
},
},
},
},
}
@ -323,12 +329,6 @@ func getContainerOrderingPod(podName string, hostDir string, containerConfigs ..
v1.ResourceMemory: resource.MustParse("15Mi"),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "shared",
MountPath: "/shared",
},
},
}
switch cc.Type {