Allow clients to request most recent container logs

Many users attempt to use 'kubectl logs' in order to find the logs
for a container, but receive no logs or an error telling them their
container is not running. The fix in this case is to run with '--previous',
but  this does not match user expectations for the logs command.

This commit changes the behavior of the Kubelet to return the logs of
the currently running container or the previous running container unless
the user provides the "previous" flag. If the user specifies "follow"
the logs of the most recent container will be displayed, and if it is
a terminated container the logs will come to an end (the user can
repeatedly invoke 'kubectl logs --follow' and see the same output).

Clean up error messages in the kubelet log path to be consistent and
give users a more predictable experience.

Have the Kubelet return 400 on invalid requests
This commit is contained in:
Clayton Coleman 2016-01-28 22:27:56 -05:00
parent 8448b60f51
commit d6d4a17db6
3 changed files with 89 additions and 62 deletions

View File

@ -2449,32 +2449,51 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time {
return val.(time.Time)
}
func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error {
switch podStatus.Phase {
case api.PodRunning, api.PodSucceeded, api.PodFailed:
return nil
}
return fmt.Errorf("pod is not in 'Running', 'Succeeded' or 'Failed' state - State: %q", podStatus.Phase)
}
func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q not found", containerName)
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
if previous {
if cStatus.LastTerminationState.Terminated == nil {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q not found", containerName)
}
cID = cStatus.LastTerminationState.Terminated.ContainerID
} else {
if cStatus.State.Waiting != nil {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q is in waiting state.", containerName)
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
case lastState.Terminated != nil:
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case kubecontainer.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case kubecontainer.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
@ -2493,7 +2512,7 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("unable to get logs for container %q in pod %q namespace %q: unable to find pod", containerName, name, namespace)
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
@ -2508,15 +2527,9 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
podStatus = pod.Status
}
if err := kl.validatePodPhase(&podStatus); err != nil {
// No log is available if pod is not in a "known" phase (e.g. Unknown).
return fmt.Errorf("Pod %q in namespace %q : %v", name, namespace, err)
}
containerID, err := kl.validateContainerStatus(&podStatus, containerName, logOptions.Previous)
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
// No log is available if the container status is missing or is in the
// waiting state.
return fmt.Errorf("Pod %q in namespace %q: %v", name, namespace, err)
return err
}
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}

View File

@ -2398,33 +2398,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
}
}
func TestValidatePodStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
testCases := []struct {
podPhase api.PodPhase
success bool
}{
{api.PodRunning, true},
{api.PodSucceeded, true},
{api.PodFailed, true},
{api.PodPending, false},
{api.PodUnknown, false},
}
for i, tc := range testCases {
err := kubelet.validatePodPhase(&api.PodStatus{Phase: tc.podPhase})
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
}
func TestValidateContainerStatus(t *testing.T) {
func TestValidateContainerLogStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
containerName := "x"
@ -2446,6 +2420,17 @@ func TestValidateContainerStatus(t *testing.T) {
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
@ -2468,10 +2453,28 @@ func TestValidateContainerStatus(t *testing.T) {
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
},
},
success: false,
},
}
for i, tc := range testCases {
_, err := kubelet.validateContainerStatus(&api.PodStatus{
_, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: tc.statuses,
}, containerName, false)
if tc.success {
@ -2482,21 +2485,31 @@ func TestValidateContainerStatus(t *testing.T) {
t.Errorf("[case %d]: unexpected success", i)
}
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil {
t.Errorf("expected error with invalid container name")
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err)
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container")
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container")
}
}
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along

View File

@ -24,6 +24,7 @@ import (
"net"
"net/http"
"net/http/pprof"
"reflect"
"strconv"
"strings"
"sync"
@ -416,7 +417,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
pod, ok := s.host.GetPodByName(podNamespace, podID)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("Pod %q does not exist", podID))
response.WriteError(http.StatusNotFound, fmt.Errorf("pod %q does not exist\n", podID))
return
}
// Check if containerName is valid.
@ -427,12 +428,12 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
}
}
if !containerExists {
response.WriteError(http.StatusNotFound, fmt.Errorf("Container %q not found in Pod %q", containerName, podID))
response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID))
return
}
if _, ok := response.ResponseWriter.(http.Flusher); !ok {
response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher", response))
response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher, cannot show logs\n", reflect.TypeOf(response)))
return
}
fw := flushwriter.Wrap(response.ResponseWriter)
@ -442,7 +443,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
response.Header().Set("Transfer-Encoding", "chunked")
if err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, logOptions, fw, fw); err != nil {
if err != limitwriter.ErrMaximumWrite {
response.WriteError(http.StatusInternalServerError, err)
response.WriteError(http.StatusBadRequest, err)
}
return
}