Merge pull request #116398 from tzneal/rework-init-containers-test

rework init containers test to remove host file dependency
This commit is contained in:
Kubernetes Prow Robot 2023-03-09 22:44:25 -08:00 committed by GitHub
commit 8ce3a2bbef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -21,8 +21,8 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"os" "sort"
"path/filepath" "strconv"
"strings" "strings"
"time" "time"
@ -54,13 +54,17 @@ type containerTestConfig struct {
func (c containerTestConfig) Command() []string { func (c containerTestConfig) Command() []string {
var cmd bytes.Buffer var cmd bytes.Buffer
// all outputs are in the format of: // all outputs are in the format of:
// timestamp container-name message // time-since-boot timestamp container-name message
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Starting' >> /shared/output; ", c.Name)
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Delaying %d' >> /shared/output; ", c.Name, c.Delay) // The busybox time command doesn't support sub-second display. uptime displays in hundredths of a second, so we
// include both and use time since boot for relative ordering
timeCmd := "`date +%s` `cat /proc/uptime | awk '{print $1}'`"
fmt.Fprintf(&cmd, "echo %s '%s Starting' >> /dev/termination-log; ", timeCmd, c.Name)
fmt.Fprintf(&cmd, "echo %s '%s Delaying %d' >> /dev/termination-log; ", timeCmd, c.Name, c.Delay)
if c.Delay != 0 { if c.Delay != 0 {
fmt.Fprintf(&cmd, "sleep %d; ", c.Delay) fmt.Fprintf(&cmd, "sleep %d; ", c.Delay)
} }
fmt.Fprintf(&cmd, "echo `date +%%s` '%s Exiting' >> /shared/output; ", c.Name) fmt.Fprintf(&cmd, "echo %s '%s Exiting' >> /dev/termination-log; ", timeCmd, c.Name)
fmt.Fprintf(&cmd, "exit %d", c.ExitCode) fmt.Fprintf(&cmd, "exit %d", c.ExitCode)
return []string{"sh", "-c", cmd.String()} return []string{"sh", "-c", cmd.String()}
} }
@ -69,17 +73,6 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
f := framework.NewDefaultFramework("initcontainers-test") f := framework.NewDefaultFramework("initcontainers-test")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var tmpDir string
ginkgo.BeforeEach(func() {
var err error
tmpDir, err = os.MkdirTemp("", "init-container-*")
framework.ExpectNoError(err, "creating temp directory")
})
ginkgo.AfterEach(func() {
os.RemoveAll(tmpDir)
})
ginkgo.It("should launch init container serially before a regular container", func() { ginkgo.It("should launch init container serially before a regular container", func() {
init1 := containerTestConfig{ init1 := containerTestConfig{
Name: "init-1", Name: "init-1",
@ -108,29 +101,32 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
/// generates an out file output like: /// generates an out file output like:
// //
// 1677116487 init-1 Starting // 1678337827 45930.43 init-1 Starting
// 1677116487 init-1 Delaying 1 // 1678337827 45930.43 init-1 Delaying 1
// 1677116488 init-1 Exiting // 1678337828 45931.43 init-1 Exiting
// 1677116489 init-2 Starting // 1678337829 45932.52 init-2 Starting
// 1677116489 init-2 Delaying 1 // 1678337829 45932.53 init-2 Delaying 1
// 1677116490 init-2 Exiting // 1678337830 45933.53 init-2 Exiting
// 1677116491 init-3 Starting // 1678337831 45934.47 init-3 Starting
// 1677116491 init-3 Delaying 1 // 1678337831 45934.47 init-3 Delaying 1
// 1677116492 init-3 Exiting // 1678337832 45935.47 init-3 Exiting
// 1677116493 regular-1 Starting // 1678337833 45936.58 regular-1 Starting
// 1677116493 regular-1 Delaying 1 // 1678337833 45936.58 regular-1 Delaying 1
// 1677116494 regular-1 Exiting // 1678337834 45937.58 regular-1 Exiting
podSpec := getContainerOrderingPod("initcontainer-test-pod", podSpec := getContainerOrderingPod("initcontainer-test-pod",
tmpDir, init1, init2, init3, regular1) init1, init2, init3, regular1)
podSpec = e2epod.NewPodClient(f).Create(context.TODO(), podSpec) client := e2epod.NewPodClient(f)
podSpec = client.Create(context.TODO(), podSpec)
ginkgo.By("Waiting for the pod to finish") ginkgo.By("Waiting for the pod to finish")
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(context.TODO(), f.ClientSet, podSpec.Name, podSpec.Namespace, 1*time.Minute) err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(context.TODO(), f.ClientSet, podSpec.Name, podSpec.Namespace, 1*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Parsing results") ginkgo.By("Parsing results")
results := parseOutput(tmpDir) podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
results := parseOutput(podSpec)
// which we then use to make assertions regarding container ordering // which we then use to make assertions regarding container ordering
ginkgo.By("Analyzing results") ginkgo.By("Analyzing results")
@ -159,15 +155,18 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
} }
podSpec := getContainerOrderingPod("initcontainer-test-pod-failure", podSpec := getContainerOrderingPod("initcontainer-test-pod-failure",
tmpDir, init1, regular1) init1, regular1)
podSpec = e2epod.NewPodClient(f).Create(context.TODO(), podSpec) client := e2epod.NewPodClient(f)
podSpec = client.Create(context.TODO(), podSpec)
ginkgo.By("Waiting for the pod to fail") ginkgo.By("Waiting for the pod to fail")
err := e2epod.WaitForPodFailedReason(context.TODO(), f.ClientSet, podSpec, "", 1*time.Minute) err := e2epod.WaitForPodFailedReason(context.TODO(), f.ClientSet, podSpec, "", 1*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Parsing results") ginkgo.By("Parsing results")
results := parseOutput(tmpDir) podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
results := parseOutput(podSpec)
ginkgo.By("Analyzing results") ginkgo.By("Analyzing results")
// init container should start and exit with an error, and the regular container should never start // init container should start and exit with an error, and the regular container should never start
@ -179,8 +178,10 @@ var _ = SIGDescribe("InitContainers [NodeConformance]", func() {
}) })
type containerOutput struct { type containerOutput struct {
line int // time the message was seen to the nearest second
timestamp string timestamp time.Time
// time the message was seen since the host booted, to the nearest hundredth of a second
timeSinceBoot float64
containerName string containerName string
command string command string
} }
@ -189,7 +190,7 @@ type containerOutputList []containerOutput
func (o containerOutputList) String() string { func (o containerOutputList) String() string {
var b bytes.Buffer var b bytes.Buffer
for _, v := range o { for _, v := range o {
fmt.Fprintf(&b, "%d %s %s %s\n", v.line, v.timestamp, v.containerName, v.command) fmt.Fprintf(&b, "%s %f %s %s\n", v.timestamp, v.timeSinceBoot, v.containerName, v.command)
} }
return b.String() return b.String()
} }
@ -253,57 +254,62 @@ func (o containerOutputList) Exits(c containerTestConfig) error {
} }
func (o containerOutputList) findIndex(name string, command string) int { func (o containerOutputList) findIndex(name string, command string) int {
for _, v := range o { for i, v := range o {
if v.containerName == name && v.command == command { if v.containerName == name && v.command == command {
return v.line return i
} }
} }
return -1 return -1
} }
func parseOutput(dir string) containerOutputList { // parseOutput combines the termination log from all of the init and regular containers and parses/sorts the outputs to
contents, err := os.ReadFile(filepath.Join(dir, "output")) // produce an execution log
framework.ExpectNoError(err, "reading output file") func parseOutput(pod *v1.Pod) containerOutputList {
// accumulate all of our statuses
var statuses []v1.ContainerStatus
statuses = append(statuses, pod.Status.InitContainerStatuses...)
statuses = append(statuses, pod.Status.ContainerStatuses...)
var buf bytes.Buffer
for _, cs := range statuses {
if cs.State.Terminated != nil {
buf.WriteString(cs.State.Terminated.Message)
}
}
sc := bufio.NewScanner(bytes.NewReader(contents)) // parse
sc := bufio.NewScanner(&buf)
var res containerOutputList var res containerOutputList
lineNo := 0
for sc.Scan() { for sc.Scan() {
lineNo++
fields := strings.Fields(sc.Text()) fields := strings.Fields(sc.Text())
if len(fields) < 3 { if len(fields) < 4 {
framework.ExpectNoError(fmt.Errorf("%v should have at least length 3", fields)) framework.ExpectNoError(fmt.Errorf("%v should have at least length 3", fields))
} }
timestamp, err := strconv.ParseInt(fields[0], 10, 64)
framework.ExpectNoError(err)
timeSinceBoot, err := strconv.ParseFloat(fields[1], 64)
framework.ExpectNoError(err)
res = append(res, containerOutput{ res = append(res, containerOutput{
line: lineNo, timestamp: time.Unix(timestamp, 0),
timestamp: fields[0], timeSinceBoot: timeSinceBoot,
containerName: fields[1], containerName: fields[2],
command: fields[2], command: fields[3],
}) })
} }
// sort using the timeSinceBoot since it has more precision
sort.Slice(res, func(i, j int) bool {
return res[i].timeSinceBoot < res[j].timeSinceBoot
})
return res return res
} }
func getContainerOrderingPod(podName string, hostDir string, containerConfigs ...containerTestConfig) *v1.Pod { func getContainerOrderingPod(podName string, containerConfigs ...containerTestConfig) *v1.Pod {
// all the pods share the given host directory
hostPathDirectory := v1.HostPathDirectory
p := &v1.Pod{ p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "shared",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: hostDir,
Type: &hostPathDirectory,
},
},
},
},
}, },
} }
@ -323,12 +329,6 @@ func getContainerOrderingPod(podName string, hostDir string, containerConfigs ..
v1.ResourceMemory: resource.MustParse("15Mi"), v1.ResourceMemory: resource.MustParse("15Mi"),
}, },
}, },
VolumeMounts: []v1.VolumeMount{
{
Name: "shared",
MountPath: "/shared",
},
},
} }
switch cc.Type { switch cc.Type {