Allow clients to request most recent container logs

Many users attempt to use 'kubectl logs' in order to find the logs
for a container, but receive no logs or an error telling them their
container is not running. The fix in this case is to run with '--previous',
but  this does not match user expectations for the logs command.

This commit changes the behavior of the Kubelet to return the logs of
the currently running container or the previous running container unless
the user provides the "previous" flag. If the user specifies "follow"
the logs of the most recent container will be displayed, and if it is
a terminated container the logs will come to an end (the user can
repeatedly invoke 'kubectl logs --follow' and see the same output).

Clean up error messages in the kubelet log path to be consistent and
give users a more predictable experience.

Have the Kubelet return 400 on invalid requests
This commit is contained in:
Clayton Coleman 2016-01-28 22:27:56 -05:00
parent 8448b60f51
commit d6d4a17db6
3 changed files with 89 additions and 62 deletions

View File

@ -2449,32 +2449,51 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time {
return val.(time.Time) return val.(time.Time)
} }
func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error { // validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
switch podStatus.Phase { // of the container. The previous flag will only return the logs for the the last terminated container, otherwise, the current
case api.PodRunning, api.PodSucceeded, api.PodFailed: // running container is preferred over a previous termination. If info about the container is not available then a specific
return nil // error is returned to the end user.
} func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
return fmt.Errorf("pod is not in 'Running', 'Succeeded' or 'Failed' state - State: %q", podStatus.Phase)
}
func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string var cID string
cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found { if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q not found", containerName) return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
} }
if previous { lastState := cStatus.LastTerminationState
if cStatus.LastTerminationState.Terminated == nil { waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q not found", containerName)
} switch {
cID = cStatus.LastTerminationState.Terminated.ContainerID case previous:
} else { if lastState.Terminated == nil {
if cStatus.State.Waiting != nil { return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
return kubecontainer.ContainerID{}, fmt.Errorf("container %q is in waiting state.", containerName)
} }
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
case lastState.Terminated != nil:
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case kubecontainer.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case kubecontainer.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
} }
return kubecontainer.ParseContainerID(cID), nil return kubecontainer.ParseContainerID(cID), nil
} }
@ -2493,7 +2512,7 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
pod, ok := kl.GetPodByName(namespace, name) pod, ok := kl.GetPodByName(namespace, name)
if !ok { if !ok {
return fmt.Errorf("unable to get logs for container %q in pod %q namespace %q: unable to find pod", containerName, name, namespace) return fmt.Errorf("pod %q cannot be found - no logs available", name)
} }
podUID := pod.UID podUID := pod.UID
@ -2508,15 +2527,9 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
podStatus = pod.Status podStatus = pod.Status
} }
if err := kl.validatePodPhase(&podStatus); err != nil { containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
// No log is available if pod is not in a "known" phase (e.g. Unknown).
return fmt.Errorf("Pod %q in namespace %q : %v", name, namespace, err)
}
containerID, err := kl.validateContainerStatus(&podStatus, containerName, logOptions.Previous)
if err != nil { if err != nil {
// No log is available if the container status is missing or is in the return err
// waiting state.
return fmt.Errorf("Pod %q in namespace %q: %v", name, namespace, err)
} }
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
} }

View File

@ -2398,33 +2398,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
} }
} }
func TestValidatePodStatus(t *testing.T) { func TestValidateContainerLogStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
testCases := []struct {
podPhase api.PodPhase
success bool
}{
{api.PodRunning, true},
{api.PodSucceeded, true},
{api.PodFailed, true},
{api.PodPending, false},
{api.PodUnknown, false},
}
for i, tc := range testCases {
err := kubelet.validatePodPhase(&api.PodStatus{Phase: tc.podPhase})
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
}
func TestValidateContainerStatus(t *testing.T) {
testKubelet := newTestKubelet(t) testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
containerName := "x" containerName := "x"
@ -2446,6 +2420,17 @@ func TestValidateContainerStatus(t *testing.T) {
}, },
success: true, success: true,
}, },
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
success: true,
},
{ {
statuses: []api.ContainerStatus{ statuses: []api.ContainerStatus{
{ {
@ -2468,10 +2453,28 @@ func TestValidateContainerStatus(t *testing.T) {
}, },
success: false, success: false,
}, },
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
},
},
success: false,
},
} }
for i, tc := range testCases { for i, tc := range testCases {
_, err := kubelet.validateContainerStatus(&api.PodStatus{ _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: tc.statuses, ContainerStatuses: tc.statuses,
}, containerName, false) }, containerName, false)
if tc.success { if tc.success {
@ -2482,21 +2485,31 @@ func TestValidateContainerStatus(t *testing.T) {
t.Errorf("[case %d]: unexpected success", i) t.Errorf("[case %d]: unexpected success", i)
} }
} }
if _, err := kubelet.validateContainerStatus(&api.PodStatus{ if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses, ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil { }, "blah", false); err == nil {
t.Errorf("expected error with invalid container name") t.Errorf("expected error with invalid container name")
} }
if _, err := kubelet.validateContainerStatus(&api.PodStatus{ if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses, ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil { }, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err) t.Errorf("unexpected error with for previous terminated container - %v", err)
} }
if _, err := kubelet.validateContainerStatus(&api.PodStatus{ if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses, ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil { }, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container") t.Errorf("expected error with for previous terminated container")
} }
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container")
}
} }
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along // updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along

View File

@ -24,6 +24,7 @@ import (
"net" "net"
"net/http" "net/http"
"net/http/pprof" "net/http/pprof"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -416,7 +417,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
pod, ok := s.host.GetPodByName(podNamespace, podID) pod, ok := s.host.GetPodByName(podNamespace, podID)
if !ok { if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("Pod %q does not exist", podID)) response.WriteError(http.StatusNotFound, fmt.Errorf("pod %q does not exist\n", podID))
return return
} }
// Check if containerName is valid. // Check if containerName is valid.
@ -427,12 +428,12 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
} }
} }
if !containerExists { if !containerExists {
response.WriteError(http.StatusNotFound, fmt.Errorf("Container %q not found in Pod %q", containerName, podID)) response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID))
return return
} }
if _, ok := response.ResponseWriter.(http.Flusher); !ok { if _, ok := response.ResponseWriter.(http.Flusher); !ok {
response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher", response)) response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher, cannot show logs\n", reflect.TypeOf(response)))
return return
} }
fw := flushwriter.Wrap(response.ResponseWriter) fw := flushwriter.Wrap(response.ResponseWriter)
@ -442,7 +443,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re
response.Header().Set("Transfer-Encoding", "chunked") response.Header().Set("Transfer-Encoding", "chunked")
if err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, logOptions, fw, fw); err != nil { if err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, logOptions, fw, fw); err != nil {
if err != limitwriter.ErrMaximumWrite { if err != limitwriter.ErrMaximumWrite {
response.WriteError(http.StatusInternalServerError, err) response.WriteError(http.StatusBadRequest, err)
} }
return return
} }