mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Use container log instead of termination log
Since the termination log cannot be accessed until the container is terminated, use the container log.
This commit is contained in:
parent
6c578bc982
commit
1fb4eee94e
@ -19,6 +19,7 @@ package e2enode
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
type execCommand struct {
|
||||
@ -41,11 +43,17 @@ type execCommand struct {
|
||||
// TerminationSeconds is the time it takes for the container before
|
||||
// terminating if it catches SIGTERM.
|
||||
TerminationSeconds int
|
||||
// ContainerName is the name of the container to append the log. If empty,
|
||||
// the name specified in ExecCommand will be used.
|
||||
ContainerName string
|
||||
}
|
||||
|
||||
// ExecCommand returns the command to execute in the container that implements execCommand and logs activities to a container
|
||||
// specific log that persists across container restarts. The final log is written to /dev/termination-log so it can
|
||||
// be retrieved by the test harness after the container execution.
|
||||
// ExecCommand returns the command to execute in the container that implements
|
||||
// execCommand and logs activities to a container specific log that persists
|
||||
// across container restarts. The final log is written to container log so it
|
||||
// can be retrieved by the test harness during the container execution.
|
||||
// Log to /proc/1/fd/1 so that the lifecycle hook handler logs are captured as
|
||||
// well.
|
||||
func ExecCommand(name string, c execCommand) []string {
|
||||
var cmd bytes.Buffer
|
||||
// all outputs are in the format of:
|
||||
@ -54,25 +62,31 @@ func ExecCommand(name string, c execCommand) []string {
|
||||
// The busybox time command doesn't support sub-second display. uptime displays in hundredths of a second, so we
|
||||
// include both and use time since boot for relative ordering of file entries
|
||||
timeCmd := "`date +%s` `cat /proc/uptime | awk '{print $1}'`"
|
||||
containerLog := fmt.Sprintf("/persistent/%s.log", name)
|
||||
containerName := name
|
||||
if c.ContainerName != "" {
|
||||
containerName = c.ContainerName
|
||||
}
|
||||
containerLog := fmt.Sprintf("/persistent/%s.log", containerName)
|
||||
|
||||
fmt.Fprintf(&cmd, "touch %s; ", containerLog)
|
||||
fmt.Fprintf(&cmd, "cat %s >> /dev/termination-log; ", containerLog)
|
||||
if c.ContainerName == "" {
|
||||
fmt.Fprintf(&cmd, "cat %s >> /proc/1/fd/1; ", containerLog)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Starting %d' | tee -a %s >> /dev/termination-log; ", timeCmd, name, c.StartDelay, containerLog)
|
||||
fmt.Fprintf(&cmd, "_term() { sleep %d; echo %s '%s Exiting' | tee -a %s >> /dev/termination-log; exit %d; }; ", c.TerminationSeconds, timeCmd, name, containerLog, c.ExitCode)
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Starting %d' | tee -a %s >> /proc/1/fd/1; ", timeCmd, name, c.StartDelay, containerLog)
|
||||
fmt.Fprintf(&cmd, "_term() { sleep %d; echo %s '%s Exiting' | tee -a %s >> /proc/1/fd/1; exit %d; }; ", c.TerminationSeconds, timeCmd, name, containerLog, c.ExitCode)
|
||||
fmt.Fprintf(&cmd, "trap _term TERM; ")
|
||||
if c.StartDelay != 0 {
|
||||
fmt.Fprint(&cmd, sleepCommand(c.StartDelay))
|
||||
}
|
||||
// You can check started file to see if the container has started
|
||||
fmt.Fprintf(&cmd, "touch started; ")
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Started' | tee -a %s >> /dev/termination-log; ", timeCmd, name, containerLog)
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Delaying %d' | tee -a %s >> /dev/termination-log; ", timeCmd, name, c.Delay, containerLog)
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Started' | tee -a %s >> /proc/1/fd/1; ", timeCmd, name, containerLog)
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Delaying %d' | tee -a %s >> /proc/1/fd/1; ", timeCmd, name, c.Delay, containerLog)
|
||||
if c.Delay != 0 {
|
||||
fmt.Fprint(&cmd, sleepCommand(c.Delay))
|
||||
}
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Exiting' | tee -a %s >> /dev/termination-log; ", timeCmd, name, containerLog)
|
||||
fmt.Fprintf(&cmd, "echo %s '%s Exiting' | tee -a %s >> /proc/1/fd/1; ", timeCmd, name, containerLog)
|
||||
fmt.Fprintf(&cmd, "exit %d", c.ExitCode)
|
||||
return []string{"sh", "-c", cmd.String()}
|
||||
}
|
||||
@ -111,18 +125,18 @@ func (o containerOutputList) RunTogether(lhs, rhs string) error {
|
||||
rhsFinish := o.findIndex(rhs, "Finishing", 0)
|
||||
|
||||
if lhsStart == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", lhs, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", lhs, o)
|
||||
}
|
||||
if rhsStart == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", rhs, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", rhs, o)
|
||||
}
|
||||
|
||||
if lhsFinish != -1 && rhsStart > lhsFinish {
|
||||
return fmt.Errorf("expected %s to start before finishing %s, got %v", rhs, lhs, o)
|
||||
return fmt.Errorf("expected %s to start before finishing %s, got\n%v", rhs, lhs, o)
|
||||
}
|
||||
|
||||
if rhsFinish != -1 && lhsStart > rhsFinish {
|
||||
return fmt.Errorf("expected %s to start before finishing %s, got %v", lhs, rhs, o)
|
||||
return fmt.Errorf("expected %s to start before finishing %s, got\n%v", lhs, rhs, o)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -133,14 +147,14 @@ func (o containerOutputList) StartsBefore(lhs, rhs string) error {
|
||||
lhsStart := o.findIndex(lhs, "Started", 0)
|
||||
|
||||
if lhsStart == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", lhs, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", lhs, o)
|
||||
}
|
||||
|
||||
// this works even for the same names (restart case)
|
||||
rhsStart := o.findIndex(rhs, "Starting", lhsStart+1)
|
||||
|
||||
if rhsStart == -1 {
|
||||
return fmt.Errorf("couldn't find that %s started after %s, got %v", rhs, lhs, o)
|
||||
return fmt.Errorf("couldn't find that %s started after %s, got\n%v", rhs, lhs, o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -150,14 +164,14 @@ func (o containerOutputList) DoesntStartAfter(lhs, rhs string) error {
|
||||
rhsStart := o.findIndex(rhs, "Starting", 0)
|
||||
|
||||
if rhsStart == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", rhs, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", rhs, o)
|
||||
}
|
||||
|
||||
// this works even for the same names (restart case)
|
||||
lhsStart := o.findIndex(lhs, "Started", rhsStart+1)
|
||||
|
||||
if lhsStart != -1 {
|
||||
return fmt.Errorf("expected %s to not start after %s, got %v", lhs, rhs, o)
|
||||
return fmt.Errorf("expected %s to not start after %s, got\n%v", lhs, rhs, o)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -168,14 +182,14 @@ func (o containerOutputList) ExitsBefore(lhs, rhs string) error {
|
||||
lhsExit := o.findIndex(lhs, "Exiting", 0)
|
||||
|
||||
if lhsExit == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever exited, got %v", lhs, o)
|
||||
return fmt.Errorf("couldn't find that %s ever exited, got\n%v", lhs, o)
|
||||
}
|
||||
|
||||
// this works even for the same names (restart case)
|
||||
rhsExit := o.findIndex(rhs, "Starting", lhsExit+1)
|
||||
|
||||
if rhsExit == -1 {
|
||||
return fmt.Errorf("couldn't find that %s starting before %s exited, got %v", rhs, lhs, o)
|
||||
return fmt.Errorf("couldn't find that %s starting before %s exited, got\n%v", rhs, lhs, o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -183,7 +197,7 @@ func (o containerOutputList) ExitsBefore(lhs, rhs string) error {
|
||||
// Starts returns an error if the container was not found to have started
|
||||
func (o containerOutputList) Starts(name string) error {
|
||||
if idx := o.findIndex(name, "Started", 0); idx == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", name, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", name, o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -191,7 +205,7 @@ func (o containerOutputList) Starts(name string) error {
|
||||
// DoesntStart returns an error if the container was found to have started
|
||||
func (o containerOutputList) DoesntStart(name string) error {
|
||||
if idx := o.findIndex(name, "Started", 0); idx != -1 {
|
||||
return fmt.Errorf("find %s started, but didn't expect to, got %v", name, o)
|
||||
return fmt.Errorf("find %s started, but didn't expect to, got\n%v", name, o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -199,7 +213,7 @@ func (o containerOutputList) DoesntStart(name string) error {
|
||||
// Exits returns an error if the container was not found to have exited
|
||||
func (o containerOutputList) Exits(name string) error {
|
||||
if idx := o.findIndex(name, "Exiting", 0); idx == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever exited, got %v", name, o)
|
||||
return fmt.Errorf("couldn't find that %s ever exited, got\n%v", name, o)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -208,13 +222,13 @@ func (o containerOutputList) Exits(name string) error {
|
||||
func (o containerOutputList) HasRestarted(name string) error {
|
||||
idx := o.findIndex(name, "Starting", 0)
|
||||
if idx == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", name, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", name, o)
|
||||
}
|
||||
|
||||
idx = o.findIndex(name, "Starting", idx+1)
|
||||
|
||||
if idx == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever restarted, got %v", name, o)
|
||||
return fmt.Errorf("couldn't find that %s ever restarted, got\n%v", name, o)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -224,13 +238,13 @@ func (o containerOutputList) HasRestarted(name string) error {
|
||||
func (o containerOutputList) HasNotRestarted(name string) error {
|
||||
idx := o.findIndex(name, "Starting", 0)
|
||||
if idx == -1 {
|
||||
return fmt.Errorf("couldn't find that %s ever started, got %v", name, o)
|
||||
return fmt.Errorf("couldn't find that %s ever started, got\n%v", name, o)
|
||||
}
|
||||
|
||||
idx = o.findIndex(name, "Starting", idx+1)
|
||||
|
||||
if idx != -1 {
|
||||
return fmt.Errorf("found that %s restarted but wasn't expected to, got %v", name, o)
|
||||
return fmt.Errorf("found that %s restarted but wasn't expected to, got\n%v", name, o)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -265,23 +279,25 @@ func (o containerOutputList) findIndex(name string, command string, startIdx int
|
||||
return -1
|
||||
}
|
||||
|
||||
// parseOutput combines the termination log from all of the init and regular containers and parses/sorts the outputs to
|
||||
// produce an execution log
|
||||
func parseOutput(pod *v1.Pod) containerOutputList {
|
||||
// parseOutput combines the container log from all of the init and regular
|
||||
// containers and parses/sorts the outputs to produce an execution log
|
||||
func parseOutput(ctx context.Context, f *framework.Framework, pod *v1.Pod) containerOutputList {
|
||||
// accumulate all of our statuses
|
||||
var statuses []v1.ContainerStatus
|
||||
statuses = append(statuses, pod.Status.InitContainerStatuses...)
|
||||
statuses = append(statuses, pod.Status.ContainerStatuses...)
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, cs := range statuses {
|
||||
// If the container is terminated but the reason is ContainerStatusUnknown,
|
||||
// it means that the kubelet has overwritten the termination message. Read
|
||||
// the LastTerminationState instead.
|
||||
if cs.State.Terminated != nil && cs.State.Terminated.Reason != "ContainerStatusUnknown" {
|
||||
buf.WriteString(cs.State.Terminated.Message)
|
||||
} else if cs.LastTerminationState.Terminated != nil {
|
||||
buf.WriteString(cs.LastTerminationState.Terminated.Message)
|
||||
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cs.Name)
|
||||
if err != nil {
|
||||
framework.Logf("error getting logs for %s: %v", cs.Name, err)
|
||||
log, err = e2epod.GetPreviousPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, cs.Name)
|
||||
if err != nil {
|
||||
framework.Logf("error getting previous logs for %s: %v", cs.Name, err)
|
||||
}
|
||||
}
|
||||
buf.WriteString(log)
|
||||
}
|
||||
|
||||
// parse
|
||||
|
@ -142,7 +142,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
// which we then use to make assertions regarding container ordering
|
||||
ginkgo.By("Analyzing results")
|
||||
@ -201,7 +201,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
// init container should start and exit with an error, and the regular container should never start
|
||||
@ -243,8 +243,9 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
PostStart: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PostStartPrefix, regular1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: regular1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -265,7 +266,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
// init container should start and exit with an error, and the regular container should never start
|
||||
@ -309,7 +310,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
// container must be restarted
|
||||
@ -341,8 +342,9 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
PostStart: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PostStartPrefix, regular1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: regular1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -371,7 +373,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
// second container should not start before the PostStart of a first container completed
|
||||
@ -427,7 +429,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not start an init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStart(init1))
|
||||
@ -500,7 +502,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.StartsBefore(init1, init2))
|
||||
@ -635,8 +637,9 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
PreStop: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PreStopPrefix, regular1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: regular1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -658,7 +661,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.RunTogether(regular1, prefixedName(PreStopPrefix, regular1)))
|
||||
@ -701,8 +704,9 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
PreStop: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PreStopPrefix, regular1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: regular1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -724,7 +728,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
podSpec, err = client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(podSpec)
|
||||
results := parseOutput(context.TODO(), f, podSpec)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.RunTogether(regular1, prefixedName(PreStopPrefix, regular1)))
|
||||
@ -842,7 +846,7 @@ var _ = SIGDescribe("[Serial] Containers Lifecycle ", func() {
|
||||
ginkgo.By("Parsing results")
|
||||
pod, err = client.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(pod)
|
||||
results := parseOutput(context.TODO(), f, pod)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
init1Started, err := results.FindIndex(init1, "Started", 0)
|
||||
@ -962,7 +966,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
|
||||
ginkgo.It("should run the first init container to completion before starting first restartable init container", func() {
|
||||
@ -1052,7 +1056,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not restart a restartable init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStartAfter(restartableInit1, regular1))
|
||||
@ -1110,7 +1114,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not start a restartable init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStart(restartableInit1))
|
||||
@ -1178,7 +1182,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -1246,7 +1250,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -1315,7 +1319,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should mark an Init container as failed", func() {
|
||||
framework.ExpectNoError(results.Exits(init1))
|
||||
@ -1381,7 +1385,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should mark an Init container as failed", func() {
|
||||
framework.ExpectNoError(results.Exits(init1))
|
||||
@ -1443,7 +1447,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not restart a restartable init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStartAfter(restartableInit1, regular1))
|
||||
@ -1501,7 +1505,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not start a restartable init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStart(restartableInit1))
|
||||
@ -1570,7 +1574,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -1640,7 +1644,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -1718,7 +1722,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should have Init container restartCount greater than 0", func() {
|
||||
framework.ExpectNoError(results.HasRestarted(init1))
|
||||
@ -1793,7 +1797,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should have Init container restartCount greater than 0", func() {
|
||||
framework.ExpectNoError(results.HasRestarted(init1))
|
||||
@ -1855,7 +1859,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
|
||||
ginkgo.It("should not restart a restartable init container", func() {
|
||||
@ -1915,7 +1919,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err = client.Get(context.Background(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should not start a restartable init container", func() {
|
||||
framework.ExpectNoError(results.DoesntStart(restartableInit1))
|
||||
@ -1983,7 +1987,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -2053,7 +2057,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should restart a restartable init container before the regular container started", func() {
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, regular1))
|
||||
@ -2131,7 +2135,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should have Init container restartCount greater than 0", func() {
|
||||
framework.ExpectNoError(results.HasRestarted(init1))
|
||||
@ -2206,7 +2210,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
podSpec, err := client.Get(context.TODO(), podSpec.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results = parseOutput(podSpec)
|
||||
results = parseOutput(context.TODO(), f, podSpec)
|
||||
})
|
||||
ginkgo.It("should have Init container restartCount greater than 0", func() {
|
||||
framework.ExpectNoError(results.HasRestarted(init1))
|
||||
@ -2290,7 +2294,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
|
||||
pod, err = client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
results := parseOutput(pod)
|
||||
results := parseOutput(context.TODO(), f, pod)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.StartsBefore(restartableInit1, restartableInit2))
|
||||
@ -2334,8 +2338,9 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
PreStop: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: restartableInit1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -2372,7 +2377,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
framework.Failf("pod %q is not pending, it's %q", pod.Name, pod.Status.Phase)
|
||||
}
|
||||
|
||||
results := parseOutput(pod)
|
||||
results := parseOutput(context.TODO(), f, pod)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||
@ -2418,8 +2423,9 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
PreStop: &v1.LifecycleHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
Delay: 1,
|
||||
ExitCode: 0,
|
||||
ContainerName: restartableInit1,
|
||||
}),
|
||||
},
|
||||
},
|
||||
@ -2455,7 +2461,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
||||
pod, err = client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
results := parseOutput(pod)
|
||||
results := parseOutput(context.TODO(), f, pod)
|
||||
|
||||
ginkgo.By("Analyzing results")
|
||||
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||
|
Loading…
Reference in New Issue
Block a user