mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Merge pull request #20192 from Random-Liu/remove-get-api-podstatus
Auto commit by PR queue bot
This commit is contained in:
commit
bd67b8a5db
@ -236,15 +236,6 @@ func (f *FakeRuntime) KillContainerInPod(container api.Container, pod *api.Pod)
|
|||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntime) GetAPIPodStatus(*api.Pod) (*api.PodStatus, error) {
|
|
||||||
f.Lock()
|
|
||||||
defer f.Unlock()
|
|
||||||
|
|
||||||
f.CalledFunctions = append(f.CalledFunctions, "GetAPIPodStatus")
|
|
||||||
status := f.APIPodStatus
|
|
||||||
return &status, f.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
||||||
f.Lock()
|
f.Lock()
|
||||||
defer f.Unlock()
|
defer f.Unlock()
|
||||||
|
@ -69,11 +69,6 @@ type Runtime interface {
|
|||||||
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
|
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
|
||||||
// TODO(random-liu): Return PodSyncResult in KillPod.
|
// TODO(random-liu): Return PodSyncResult in KillPod.
|
||||||
KillPod(pod *api.Pod, runningPod Pod) error
|
KillPod(pod *api.Pod, runningPod Pod) error
|
||||||
// GetAPIPodStatus retrieves the api.PodStatus of the pod, including the information of
|
|
||||||
// all containers in the pod. Clients of this interface assume the
|
|
||||||
// containers' statuses in a pod always have a deterministic ordering
|
|
||||||
// (e.g., sorted by name).
|
|
||||||
GetAPIPodStatus(*api.Pod) (*api.PodStatus, error)
|
|
||||||
// GetPodStatus retrieves the status of the pod, including the
|
// GetPodStatus retrieves the status of the pod, including the
|
||||||
// information of all containers in the pod that are visble in Runtime.
|
// information of all containers in the pod that are visble in Runtime.
|
||||||
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
|
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
|
||||||
|
@ -77,11 +77,6 @@ func (r *Mock) KillContainerInPod(container api.Container, pod *api.Pod) error {
|
|||||||
return args.Error(0)
|
return args.Error(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Mock) GetAPIPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
|
||||||
args := r.Called(pod)
|
|
||||||
return args.Get(0).(*api.PodStatus), args.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) {
|
||||||
args := r.Called(uid, name, namespace)
|
args := r.Called(uid, name, namespace)
|
||||||
return args.Get(0).(*PodStatus), args.Error(1)
|
return args.Get(0).(*PodStatus), args.Error(1)
|
||||||
|
@ -247,7 +247,7 @@ func (f *FakeDockerClient) CreateContainer(c docker.CreateContainerOptions) (*do
|
|||||||
// Docker likes to add a '/', so copy that behavior.
|
// Docker likes to add a '/', so copy that behavior.
|
||||||
name := "/" + c.Name
|
name := "/" + c.Name
|
||||||
f.Created = append(f.Created, name)
|
f.Created = append(f.Created, name)
|
||||||
// The newest container should be in front, because we assume so in GetAPIPodStatus()
|
// The newest container should be in front, because we assume so in GetPodStatus()
|
||||||
f.ContainerList = append([]docker.APIContainers{
|
f.ContainerList = append([]docker.APIContainers{
|
||||||
{ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels},
|
{ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels},
|
||||||
}, f.ContainerList...)
|
}, f.ContainerList...)
|
||||||
@ -300,7 +300,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout uint) error {
|
|||||||
var newList []docker.APIContainers
|
var newList []docker.APIContainers
|
||||||
for _, container := range f.ContainerList {
|
for _, container := range f.ContainerList {
|
||||||
if container.ID == id {
|
if container.ID == id {
|
||||||
// The newest exited container should be in front. Because we assume so in GetAPIPodStatus()
|
// The newest exited container should be in front. Because we assume so in GetPodStatus()
|
||||||
f.ExitedContainerList = append([]docker.APIContainers{container}, f.ExitedContainerList...)
|
f.ExitedContainerList = append([]docker.APIContainers{container}, f.ExitedContainerList...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -109,8 +109,7 @@ type DockerManager struct {
|
|||||||
// means that some entries may be recycled before a pod has been
|
// means that some entries may be recycled before a pod has been
|
||||||
// deleted.
|
// deleted.
|
||||||
reasonCache reasonInfoCache
|
reasonCache reasonInfoCache
|
||||||
// TODO(yifan): Record the pull failure so we can eliminate the image checking
|
// TODO(yifan): Record the pull failure so we can eliminate the image checking?
|
||||||
// in GetAPIPodStatus()?
|
|
||||||
// Lower level docker image puller.
|
// Lower level docker image puller.
|
||||||
dockerPuller DockerPuller
|
dockerPuller DockerPuller
|
||||||
|
|
||||||
@ -411,17 +410,6 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
|
|||||||
return &status, "", nil
|
return &status, "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAPIPodStatus returns docker related status for all containers in the pod
|
|
||||||
// spec.
|
|
||||||
func (dm *DockerManager) GetAPIPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
|
||||||
// Get the pod status.
|
|
||||||
podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dm.ConvertPodStatusToAPIPodStatus(pod, podStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dm *DockerManager) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) (*api.PodStatus, error) {
|
func (dm *DockerManager) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) (*api.PodStatus, error) {
|
||||||
var apiPodStatus api.PodStatus
|
var apiPodStatus api.PodStatus
|
||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
|
@ -31,10 +31,8 @@ import (
|
|||||||
|
|
||||||
docker "github.com/fsouza/go-dockerclient"
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||||
@ -562,7 +560,6 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p
|
|||||||
if backOff == nil {
|
if backOff == nil {
|
||||||
backOff = util.NewBackOff(time.Second, time.Minute)
|
backOff = util.NewBackOff(time.Second, time.Minute)
|
||||||
}
|
}
|
||||||
//TODO(random-liu): Add test for PodSyncResult
|
|
||||||
result := dm.SyncPod(pod, *apiPodStatus, podStatus, []api.Secret{}, backOff)
|
result := dm.SyncPod(pod, *apiPodStatus, podStatus, []api.Secret{}, backOff)
|
||||||
err = result.Error()
|
err = result.Error()
|
||||||
if err != nil && !expectErr {
|
if err != nil && !expectErr {
|
||||||
@ -895,65 +892,6 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncPodWithPullPolicy(t *testing.T) {
|
|
||||||
dm, fakeDocker := newTestDockerManager()
|
|
||||||
puller := dm.dockerPuller.(*FakeDockerPuller)
|
|
||||||
puller.HasImages = []string{"existing_one", "want:latest"}
|
|
||||||
dm.podInfraContainerImage = "pod_infra_image"
|
|
||||||
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: "12345678",
|
|
||||||
Name: "foo",
|
|
||||||
Namespace: "new",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways},
|
|
||||||
{Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent},
|
|
||||||
{Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent},
|
|
||||||
{Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent},
|
|
||||||
{Name: "bar5", Image: "pull_never_image", ImagePullPolicy: api.PullNever},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedStatusMap := map[string]api.ContainerState{
|
|
||||||
"bar": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
||||||
"bar2": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
||||||
"bar3": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
||||||
"bar4": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
||||||
"bar5": {Waiting: &api.ContainerStateWaiting{Reason: kubecontainer.ErrImageNeverPull.Error(),
|
|
||||||
Message: "Container image \"pull_never_image\" is not present with pull policy of Never"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
||||||
statuses, err := dm.GetAPIPodStatus(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unable to get pod status")
|
|
||||||
}
|
|
||||||
for _, c := range pod.Spec.Containers {
|
|
||||||
if containerStatus, ok := api.GetContainerStatus(statuses.ContainerStatuses, c.Name); ok {
|
|
||||||
// copy the StartedAt time, to make the structs match
|
|
||||||
if containerStatus.State.Running != nil && expectedStatusMap[c.Name].Running != nil {
|
|
||||||
expectedStatusMap[c.Name].Running.StartedAt = containerStatus.State.Running.StartedAt
|
|
||||||
}
|
|
||||||
assert.Equal(t, expectedStatusMap[c.Name], containerStatus.State, "for container %s", c.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fakeDocker.Lock()
|
|
||||||
defer fakeDocker.Unlock()
|
|
||||||
|
|
||||||
pulledImageSorted := puller.ImagesPulled[:]
|
|
||||||
sort.Strings(pulledImageSorted)
|
|
||||||
assert.Equal(t, []string{"pod_infra_image", "pull_always_image", "pull_if_not_present_image"}, pulledImageSorted)
|
|
||||||
|
|
||||||
if len(fakeDocker.Created) != 5 {
|
|
||||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncPodWithRestartPolicy(t *testing.T) {
|
func TestSyncPodWithRestartPolicy(t *testing.T) {
|
||||||
dm, fakeDocker := newTestDockerManager()
|
dm, fakeDocker := newTestDockerManager()
|
||||||
containers := []api.Container{
|
containers := []api.Container{
|
||||||
@ -1058,112 +996,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetAPIPodStatusWithLastTermination(t *testing.T) {
|
|
||||||
dm, fakeDocker := newTestDockerManager()
|
|
||||||
containers := []api.Container{
|
|
||||||
{Name: "succeeded"},
|
|
||||||
{Name: "failed"},
|
|
||||||
}
|
|
||||||
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: "12345678",
|
|
||||||
Name: "foo",
|
|
||||||
Namespace: "new",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: containers,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
dockerContainers := []*docker.Container{
|
|
||||||
{
|
|
||||||
ID: "9876",
|
|
||||||
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
||||||
State: docker.State{
|
|
||||||
StartedAt: time.Now(),
|
|
||||||
FinishedAt: time.Now(),
|
|
||||||
Running: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "1234",
|
|
||||||
Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0",
|
|
||||||
State: docker.State{
|
|
||||||
ExitCode: 0,
|
|
||||||
StartedAt: time.Now(),
|
|
||||||
FinishedAt: time.Now(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "5678",
|
|
||||||
Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0",
|
|
||||||
State: docker.State{
|
|
||||||
ExitCode: 42,
|
|
||||||
StartedAt: time.Now(),
|
|
||||||
FinishedAt: time.Now(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
policy api.RestartPolicy
|
|
||||||
created []string
|
|
||||||
stopped []string
|
|
||||||
lastTerminations []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
api.RestartPolicyAlways,
|
|
||||||
[]string{"succeeded", "failed"},
|
|
||||||
[]string{},
|
|
||||||
[]string{"docker://1234", "docker://5678"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
api.RestartPolicyOnFailure,
|
|
||||||
[]string{"failed"},
|
|
||||||
[]string{},
|
|
||||||
[]string{"docker://5678"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
api.RestartPolicyNever,
|
|
||||||
[]string{},
|
|
||||||
[]string{"9876"},
|
|
||||||
[]string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tt := range tests {
|
|
||||||
fakeDocker.SetFakeContainers(dockerContainers)
|
|
||||||
fakeDocker.ClearCalls()
|
|
||||||
pod.Spec.RestartPolicy = tt.policy
|
|
||||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
|
||||||
|
|
||||||
// Check if we can retrieve the pod status.
|
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error %v", err)
|
|
||||||
}
|
|
||||||
terminatedContainers := []string{}
|
|
||||||
for _, cs := range status.ContainerStatuses {
|
|
||||||
if cs.LastTerminationState.Terminated != nil {
|
|
||||||
terminatedContainers = append(terminatedContainers, cs.LastTerminationState.Terminated.ContainerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.StringSlice(terminatedContainers).Sort()
|
|
||||||
sort.StringSlice(tt.lastTerminations).Sort()
|
|
||||||
if !reflect.DeepEqual(terminatedContainers, tt.lastTerminations) {
|
|
||||||
t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", tt.lastTerminations, terminatedContainers)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fakeDocker.AssertCreated(tt.created); err != nil {
|
|
||||||
t.Errorf("%d: %v", i, err)
|
|
||||||
}
|
|
||||||
if err := fakeDocker.AssertStopped(tt.stopped); err != nil {
|
|
||||||
t.Errorf("%d: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncPodBackoff(t *testing.T) {
|
func TestSyncPodBackoff(t *testing.T) {
|
||||||
var fakeClock = util.NewFakeClock(time.Now())
|
var fakeClock = util.NewFakeClock(time.Now())
|
||||||
startTime := fakeClock.Now()
|
startTime := fakeClock.Now()
|
||||||
@ -1253,101 +1085,10 @@ func TestSyncPodBackoff(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func TestGetPodCreationFailureReason(t *testing.T) {
|
|
||||||
dm, fakeDocker := newTestDockerManager()
|
|
||||||
// Inject the creation failure error to docker.
|
|
||||||
failureReason := "RunContainerError"
|
|
||||||
fakeDocker.Errors = map[string]error{
|
|
||||||
"create": fmt.Errorf("%s", failureReason),
|
|
||||||
}
|
|
||||||
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: "12345678",
|
|
||||||
Name: "foo",
|
|
||||||
Namespace: "new",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{{Name: "bar"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pretend that the pod infra container has already been created, so that
|
|
||||||
// we can run the user containers.
|
|
||||||
fakeDocker.SetFakeRunningContainers([]*docker.Container{{
|
|
||||||
ID: "9876",
|
|
||||||
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
||||||
}})
|
|
||||||
|
|
||||||
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
||||||
// Check if we can retrieve the pod status.
|
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(status.ContainerStatuses) < 1 {
|
|
||||||
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
|
||||||
} else {
|
|
||||||
state := status.ContainerStatuses[0].State
|
|
||||||
if state.Waiting == nil {
|
|
||||||
t.Errorf("expected waiting state, got %#v", state)
|
|
||||||
} else if state.Waiting.Reason != failureReason {
|
|
||||||
t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetPodPullImageFailureReason(t *testing.T) {
|
|
||||||
dm, fakeDocker := newTestDockerManager()
|
|
||||||
// Initialize the FakeDockerPuller so that it'd try to pull non-existent
|
|
||||||
// images.
|
|
||||||
puller := dm.dockerPuller.(*FakeDockerPuller)
|
|
||||||
puller.HasImages = []string{}
|
|
||||||
// Inject the pull image failure error.
|
|
||||||
failureReason := kubecontainer.ErrImagePull.Error()
|
|
||||||
puller.ErrorsToInject = []error{fmt.Errorf("%s", failureReason)}
|
|
||||||
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: "12345678",
|
|
||||||
Name: "foo",
|
|
||||||
Namespace: "new",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{{Name: "bar", Image: "realImage", ImagePullPolicy: api.PullAlways}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Pretend that the pod infra container has already been created, so that
|
|
||||||
// we can run the user containers.
|
|
||||||
fakeDocker.SetFakeRunningContainers([]*docker.Container{{
|
|
||||||
ID: "9876",
|
|
||||||
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
||||||
}})
|
|
||||||
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
||||||
// Check if we can retrieve the pod status.
|
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(status.ContainerStatuses) < 1 {
|
|
||||||
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
|
||||||
} else {
|
|
||||||
state := status.ContainerStatuses[0].State
|
|
||||||
if state.Waiting == nil {
|
|
||||||
t.Errorf("expected waiting state, got %#v", state)
|
|
||||||
} else if state.Waiting.Reason != failureReason {
|
|
||||||
t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetRestartCount(t *testing.T) {
|
func TestGetRestartCount(t *testing.T) {
|
||||||
dm, fakeDocker := newTestDockerManager()
|
dm, fakeDocker := newTestDockerManager()
|
||||||
containers := []api.Container{
|
containerName := "bar"
|
||||||
{Name: "bar"},
|
|
||||||
}
|
|
||||||
pod := api.Pod{
|
pod := api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
UID: "12345678",
|
UID: "12345678",
|
||||||
@ -1355,67 +1096,83 @@ func TestGetRestartCount(t *testing.T) {
|
|||||||
Namespace: "new",
|
Namespace: "new",
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: api.PodSpec{
|
||||||
Containers: containers,
|
Containers: []api.Container{
|
||||||
|
{Name: containerName},
|
||||||
|
},
|
||||||
RestartPolicy: "Always",
|
RestartPolicy: "Always",
|
||||||
},
|
},
|
||||||
|
Status: api.PodStatus{
|
||||||
|
ContainerStatuses: []api.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: containerName,
|
||||||
|
RestartCount: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function for verifying the restart count.
|
// Helper function for verifying the restart count.
|
||||||
verifyRestartCount := func(pod *api.Pod, expectedCount int) api.PodStatus {
|
verifyRestartCount := func(pod *api.Pod, expectedCount int) {
|
||||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %v", err)
|
t.Fatalf("Unexpected error %v", err)
|
||||||
}
|
}
|
||||||
restartCount := status.ContainerStatuses[0].RestartCount
|
cs := status.FindContainerStatusByName(containerName)
|
||||||
|
if cs == nil {
|
||||||
|
t.Fatal("Can't find status for container %q", containerName)
|
||||||
|
}
|
||||||
|
restartCount := cs.RestartCount
|
||||||
if restartCount != expectedCount {
|
if restartCount != expectedCount {
|
||||||
t.Errorf("expected %d restart count, got %d", expectedCount, restartCount)
|
t.Errorf("expected %d restart count, got %d", expectedCount, restartCount)
|
||||||
}
|
}
|
||||||
return *status
|
|
||||||
}
|
}
|
||||||
|
|
||||||
killOneContainer := func(pod *api.Pod) {
|
killOneContainer := func(pod *api.Pod) {
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %v", err)
|
t.Fatalf("Unexpected error %v", err)
|
||||||
}
|
}
|
||||||
containerID := kubecontainer.ParseContainerID(status.ContainerStatuses[0].ContainerID)
|
cs := status.FindContainerStatusByName(containerName)
|
||||||
dm.KillContainerInPod(containerID, &pod.Spec.Containers[0], pod, "test container restart count.")
|
if cs == nil {
|
||||||
|
t.Fatal("Can't find status for container %q", containerName)
|
||||||
|
}
|
||||||
|
dm.KillContainerInPod(cs.ID, &pod.Spec.Containers[0], pod, "test container restart count.")
|
||||||
}
|
}
|
||||||
// Container "bar" starts the first time.
|
// Container "bar" starts the first time.
|
||||||
// TODO: container lists are expected to be sorted reversely by time.
|
// TODO: container lists are expected to be sorted reversely by time.
|
||||||
// We should fix FakeDockerClient to sort the list before returning.
|
// We should fix FakeDockerClient to sort the list before returning.
|
||||||
// (randome-liu) Just partially sorted now.
|
// (randome-liu) Just partially sorted now.
|
||||||
pod.Status = verifyRestartCount(&pod, 0)
|
verifyRestartCount(&pod, 0)
|
||||||
killOneContainer(&pod)
|
killOneContainer(&pod)
|
||||||
|
|
||||||
// Poor container "bar" has been killed, and should be restarted with restart count 1
|
// Poor container "bar" has been killed, and should be restarted with restart count 1
|
||||||
pod.Status = verifyRestartCount(&pod, 1)
|
verifyRestartCount(&pod, 1)
|
||||||
killOneContainer(&pod)
|
killOneContainer(&pod)
|
||||||
|
|
||||||
// Poor container "bar" has been killed again, and should be restarted with restart count 2
|
// Poor container "bar" has been killed again, and should be restarted with restart count 2
|
||||||
pod.Status = verifyRestartCount(&pod, 2)
|
verifyRestartCount(&pod, 2)
|
||||||
killOneContainer(&pod)
|
killOneContainer(&pod)
|
||||||
|
|
||||||
// Poor container "bar" has been killed again ang again, and should be restarted with restart count 3
|
// Poor container "bar" has been killed again ang again, and should be restarted with restart count 3
|
||||||
pod.Status = verifyRestartCount(&pod, 3)
|
verifyRestartCount(&pod, 3)
|
||||||
|
|
||||||
// The oldest container has been garbage collected
|
// The oldest container has been garbage collected
|
||||||
exitedContainers := fakeDocker.ExitedContainerList
|
exitedContainers := fakeDocker.ExitedContainerList
|
||||||
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-1]
|
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-1]
|
||||||
pod.Status = verifyRestartCount(&pod, 3)
|
verifyRestartCount(&pod, 3)
|
||||||
|
|
||||||
// The last two oldest containers have been garbage collected
|
// The last two oldest containers have been garbage collected
|
||||||
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-2]
|
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-2]
|
||||||
pod.Status = verifyRestartCount(&pod, 3)
|
verifyRestartCount(&pod, 3)
|
||||||
|
|
||||||
// All exited containers have been garbage collected
|
// All exited containers have been garbage collected, restart count should be got from old api pod status
|
||||||
fakeDocker.ExitedContainerList = []docker.APIContainers{}
|
fakeDocker.ExitedContainerList = []docker.APIContainers{}
|
||||||
pod.Status = verifyRestartCount(&pod, 3)
|
verifyRestartCount(&pod, 3)
|
||||||
killOneContainer(&pod)
|
killOneContainer(&pod)
|
||||||
|
|
||||||
// Poor container "bar" has been killed again ang again and again, and should be restarted with restart count 4
|
// Poor container "bar" has been killed again ang again and again, and should be restarted with restart count 4
|
||||||
pod.Status = verifyRestartCount(&pod, 4)
|
verifyRestartCount(&pod, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetTerminationMessagePath(t *testing.T) {
|
func TestGetTerminationMessagePath(t *testing.T) {
|
||||||
@ -1686,57 +1443,6 @@ func TestSyncPodWithHostNetwork(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetAPIPodStatusSortedContainers(t *testing.T) {
|
|
||||||
dm, fakeDocker := newTestDockerManager()
|
|
||||||
specContainerList := []api.Container{}
|
|
||||||
expectedOrder := []string{}
|
|
||||||
|
|
||||||
numContainers := 10
|
|
||||||
podName := "foo"
|
|
||||||
podNs := "test"
|
|
||||||
podUID := "uid1"
|
|
||||||
fakeConfig := &docker.Config{
|
|
||||||
Image: "some:latest",
|
|
||||||
}
|
|
||||||
|
|
||||||
dockerContainers := []*docker.Container{}
|
|
||||||
for i := 0; i < numContainers; i++ {
|
|
||||||
id := fmt.Sprintf("%v", i)
|
|
||||||
containerName := fmt.Sprintf("%vcontainer", id)
|
|
||||||
expectedOrder = append(expectedOrder, containerName)
|
|
||||||
dockerContainers = append(dockerContainers, &docker.Container{
|
|
||||||
ID: id,
|
|
||||||
Name: fmt.Sprintf("/k8s_%v_%v_%v_%v_42", containerName, podName, podNs, podUID),
|
|
||||||
Config: fakeConfig,
|
|
||||||
Image: fmt.Sprintf("%vimageid", id),
|
|
||||||
})
|
|
||||||
specContainerList = append(specContainerList, api.Container{Name: containerName})
|
|
||||||
}
|
|
||||||
fakeDocker.SetFakeRunningContainers(dockerContainers)
|
|
||||||
fakeDocker.ClearCalls()
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
UID: types.UID(podUID),
|
|
||||||
Name: podName,
|
|
||||||
Namespace: podNs,
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: specContainerList,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
status, err := dm.GetAPIPodStatus(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error %v", err)
|
|
||||||
}
|
|
||||||
for i, c := range status.ContainerStatuses {
|
|
||||||
if expectedOrder[i] != c.Name {
|
|
||||||
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVerifyNonRoot(t *testing.T) {
|
func TestVerifyNonRoot(t *testing.T) {
|
||||||
dm, fakeDocker := newTestDockerManager()
|
dm, fakeDocker := newTestDockerManager()
|
||||||
|
|
||||||
@ -1898,3 +1604,5 @@ func TestGetIPCMode(t *testing.T) {
|
|||||||
t.Errorf("expected host ipc mode for pod but got %v", ipcMode)
|
t.Errorf("expected host ipc mode for pod but got %v", ipcMode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): Add unit test for returned PodSyncResult (issue #20478)
|
||||||
|
@ -1629,10 +1629,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, podStatus *kubecont
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
apiPodStatus, err := kl.generatePodStatus(pod, podStatus)
|
apiPodStatus := kl.generatePodStatus(pod, podStatus)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Record the time it takes for the pod to become running.
|
// Record the time it takes for the pod to become running.
|
||||||
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
|
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
if !ok || existingStatus.Phase == api.PodPending && apiPodStatus.Phase == api.PodRunning &&
|
if !ok || existingStatus.Phase == api.PodPending && apiPodStatus.Phase == api.PodRunning &&
|
||||||
@ -1836,11 +1833,12 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
|
|||||||
}
|
}
|
||||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
if !found {
|
if !found {
|
||||||
statusPtr, err := kl.containerRuntime.GetAPIPodStatus(pod)
|
// TODO(random-liu): Cleanup status get functions. (issue #20477)
|
||||||
|
s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
status = *statusPtr
|
status = kl.generatePodStatus(pod, s)
|
||||||
}
|
}
|
||||||
if status.Phase == api.PodRunning {
|
if status.Phase == api.PodRunning {
|
||||||
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
|
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
|
||||||
@ -3138,7 +3136,7 @@ func (kl *Kubelet) getRuntimePodStatus(pod *api.Pod) (*kubecontainer.PodStatus,
|
|||||||
return kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
return kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) generatePodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) (api.PodStatus, error) {
|
func (kl *Kubelet) generatePodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
|
||||||
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
|
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
|
||||||
// TODO: Consider include the container information.
|
// TODO: Consider include the container information.
|
||||||
if kl.pastActiveDeadline(pod) {
|
if kl.pastActiveDeadline(pod) {
|
||||||
@ -3147,7 +3145,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod, podStatus *kubecontainer.PodS
|
|||||||
return api.PodStatus{
|
return api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
Reason: reason,
|
Reason: reason,
|
||||||
Message: "Pod was active on the node longer than specified deadline"}, nil
|
Message: "Pod was active on the node longer than specified deadline"}
|
||||||
}
|
}
|
||||||
|
|
||||||
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
||||||
@ -3170,7 +3168,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod, podStatus *kubecontainer.PodS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return *s, nil
|
return *s
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(random-liu): Move this to some better place.
|
// TODO(random-liu): Move this to some better place.
|
||||||
|
@ -4220,13 +4220,13 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCleanupBandwidthLimits(t *testing.T) {
|
func TestCleanupBandwidthLimits(t *testing.T) {
|
||||||
|
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
|
||||||
|
// layer status getter function and test that function instead.
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
status *api.PodStatus
|
status *api.PodStatus
|
||||||
pods []*api.Pod
|
pods []*api.Pod
|
||||||
inputCIDRs []string
|
inputCIDRs []string
|
||||||
expectResetCIDRs []string
|
expectResetCIDRs []string
|
||||||
cacheStatus bool
|
|
||||||
expectedCalls []string
|
|
||||||
name string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -4251,35 +4251,8 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
||||||
expectedCalls: []string{"GetAPIPodStatus"},
|
|
||||||
name: "pod running",
|
name: "pod running",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
status: &api.PodStatus{
|
|
||||||
PodIP: "1.2.3.4",
|
|
||||||
Phase: api.PodRunning,
|
|
||||||
},
|
|
||||||
pods: []*api.Pod{
|
|
||||||
{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "foo",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"kubernetes.io/ingress-bandwidth": "10M",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "bar",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectedCalls: []string{},
|
|
||||||
cacheStatus: true,
|
|
||||||
name: "pod running with cache",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
status: &api.PodStatus{
|
status: &api.PodStatus{
|
||||||
PodIP: "1.2.3.4",
|
PodIP: "1.2.3.4",
|
||||||
@ -4302,7 +4275,6 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
||||||
expectedCalls: []string{"GetAPIPodStatus"},
|
|
||||||
name: "pod not running",
|
name: "pod not running",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -4310,32 +4282,6 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||||||
PodIP: "1.2.3.4",
|
PodIP: "1.2.3.4",
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
},
|
},
|
||||||
pods: []*api.Pod{
|
|
||||||
{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "foo",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
"kubernetes.io/ingress-bandwidth": "10M",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "bar",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
|
|
||||||
expectedCalls: []string{},
|
|
||||||
cacheStatus: true,
|
|
||||||
name: "pod not running with cache",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
status: &api.PodStatus{
|
|
||||||
PodIP: "1.2.3.4",
|
|
||||||
Phase: api.PodRunning,
|
|
||||||
},
|
|
||||||
pods: []*api.Pod{
|
pods: []*api.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
@ -4360,12 +4306,9 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||||||
|
|
||||||
testKube := newTestKubelet(t)
|
testKube := newTestKubelet(t)
|
||||||
testKube.kubelet.shaper = shaper
|
testKube.kubelet.shaper = shaper
|
||||||
testKube.fakeRuntime.APIPodStatus = *test.status
|
|
||||||
|
|
||||||
if test.cacheStatus {
|
for _, pod := range test.pods {
|
||||||
for _, pod := range test.pods {
|
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
|
||||||
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
|
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
|
||||||
@ -4375,14 +4318,6 @@ func TestCleanupBandwidthLimits(t *testing.T) {
|
|||||||
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
|
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
|
||||||
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
|
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if test.cacheStatus {
|
|
||||||
if len(testKube.fakeRuntime.CalledFunctions) != 0 {
|
|
||||||
t.Errorf("unexpected function calls: %v", testKube.fakeRuntime.CalledFunctions)
|
|
||||||
}
|
|
||||||
} else if !reflect.DeepEqual(testKube.fakeRuntime.CalledFunctions, test.expectedCalls) {
|
|
||||||
t.Errorf("[%s], expected %v, saw %v", test.name, test.expectedCalls, testKube.fakeRuntime.CalledFunctions)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4515,3 +4450,5 @@ func TestGetPodsToSync(t *testing.T) {
|
|||||||
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
|
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(random-liu): Add unit test for convertStatusToAPIStatus (issue #20478)
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,7 +36,7 @@ type HandlerRunner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type podStatusProvider interface {
|
type podStatusProvider interface {
|
||||||
GetAPIPodStatus(pod *api.Pod) (*api.PodStatus, error)
|
GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandlerRunner(httpGetter kubetypes.HttpGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
func NewHandlerRunner(httpGetter kubetypes.HttpGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
||||||
@ -86,15 +87,15 @@ func resolvePort(portReference intstr.IntOrString, container *api.Container) (in
|
|||||||
func (hr *HandlerRunner) runHTTPHandler(pod *api.Pod, container *api.Container, handler *api.Handler) error {
|
func (hr *HandlerRunner) runHTTPHandler(pod *api.Pod, container *api.Container, handler *api.Handler) error {
|
||||||
host := handler.HTTPGet.Host
|
host := handler.HTTPGet.Host
|
||||||
if len(host) == 0 {
|
if len(host) == 0 {
|
||||||
status, err := hr.containerManager.GetAPIPodStatus(pod)
|
status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Unable to get pod info, event handlers may be invalid.")
|
glog.Errorf("Unable to get pod info, event handlers may be invalid.")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if status.PodIP == "" {
|
if status.IP == "" {
|
||||||
return fmt.Errorf("failed to find networking container: %v", status)
|
return fmt.Errorf("failed to find networking container: %v", status)
|
||||||
}
|
}
|
||||||
host = status.PodIP
|
host = status.IP
|
||||||
}
|
}
|
||||||
var port int
|
var port int
|
||||||
if handler.HTTPGet.Port.Type == intstr.String && len(handler.HTTPGet.Port.StrVal) == 0 {
|
if handler.HTTPGet.Port.Type == intstr.String && len(handler.HTTPGet.Port.StrVal) == 0 {
|
||||||
|
@ -1054,16 +1054,6 @@ func (r *Runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAPIPodStatus returns the status of the given pod.
|
|
||||||
func (r *Runtime) GetAPIPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
|
||||||
// Get the pod status.
|
|
||||||
podStatus, err := r.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r.ConvertPodStatusToAPIPodStatus(pod, podStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Runtime) Type() string {
|
func (r *Runtime) Type() string {
|
||||||
return RktType
|
return RktType
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
)
|
)
|
||||||
@ -89,10 +89,10 @@ func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results [
|
|||||||
results = append(results, res)
|
results = append(results, res)
|
||||||
if res.Err != nil {
|
if res.Err != nil {
|
||||||
// TODO(proppy): report which containers failed the pod.
|
// TODO(proppy): report which containers failed the pod.
|
||||||
glog.Infof("failed to start pod %q: %v", res.Pod.Name, res.Err)
|
glog.Infof("failed to start pod %q: %v", format.Pod(res.Pod), res.Err)
|
||||||
failedPods = append(failedPods, res.Pod.Name)
|
failedPods = append(failedPods, format.Pod(res.Pod))
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("started pod %q", res.Pod.Name)
|
glog.Infof("started pod %q", format.Pod(res.Pod))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(failedPods) > 0 {
|
if len(failedPods) > 0 {
|
||||||
@ -107,25 +107,17 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||||||
delay := retryDelay
|
delay := retryDelay
|
||||||
retry := 0
|
retry := 0
|
||||||
for {
|
for {
|
||||||
pods, err := kl.containerRuntime.GetPods(false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get kubelet pods: %v", err)
|
|
||||||
}
|
|
||||||
p := container.Pods(pods).FindPodByID(pod.UID)
|
|
||||||
running, err := kl.isPodRunning(pod, p)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check pod status: %v", err)
|
|
||||||
}
|
|
||||||
if running {
|
|
||||||
glog.Infof("pod %q containers running", pod.Name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
glog.Infof("pod %q containers not running: syncing", pod.Name)
|
|
||||||
|
|
||||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Unable to get status for pod %q: %v", pod.Name, err)
|
return fmt.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if kl.isPodRunning(pod, status) {
|
||||||
|
glog.Infof("pod %q containers running", format.Pod(pod))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
glog.Infof("pod %q containers not running: syncing", format.Pod(pod))
|
||||||
|
|
||||||
glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||||
glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
||||||
@ -133,13 +125,13 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||||
|
|
||||||
if err = kl.syncPod(pod, mirrorPod, status, kubetypes.SyncPodUpdate); err != nil {
|
if err = kl.syncPod(pod, mirrorPod, status, kubetypes.SyncPodUpdate); err != nil {
|
||||||
return fmt.Errorf("error syncing pod: %v", err)
|
return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err)
|
||||||
}
|
}
|
||||||
if retry >= runOnceMaxRetries {
|
if retry >= runOnceMaxRetries {
|
||||||
return fmt.Errorf("timeout error: pod %q containers not running after %d retries", pod.Name, runOnceMaxRetries)
|
return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries)
|
||||||
}
|
}
|
||||||
// TODO(proppy): health checking would be better than waiting + checking the state at the next iteration.
|
// TODO(proppy): health checking would be better than waiting + checking the state at the next iteration.
|
||||||
glog.Infof("pod %q containers synced, waiting for %v", pod.Name, delay)
|
glog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay)
|
||||||
time.Sleep(delay)
|
time.Sleep(delay)
|
||||||
retry++
|
retry++
|
||||||
delay *= runOnceRetryDelayBackoff
|
delay *= runOnceRetryDelayBackoff
|
||||||
@ -147,18 +139,13 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isPodRunning returns true if all containers of a manifest are running.
|
// isPodRunning returns true if all containers of a manifest are running.
|
||||||
func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) {
|
func (kl *Kubelet) isPodRunning(pod *api.Pod, status *kubecontainer.PodStatus) bool {
|
||||||
// TODO(random-liu): Change this to new pod status
|
for _, c := range pod.Spec.Containers {
|
||||||
status, err := kl.containerRuntime.GetAPIPodStatus(pod)
|
cs := status.FindContainerStatusByName(c.Name)
|
||||||
if err != nil {
|
if cs == nil || cs.State != kubecontainer.ContainerStateRunning {
|
||||||
glog.Infof("Failed to get the status of pod %q: %v", format.Pod(pod), err)
|
glog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod))
|
||||||
return false, err
|
return false
|
||||||
}
|
|
||||||
for _, st := range status.ContainerStatuses {
|
|
||||||
if st.State.Running == nil {
|
|
||||||
glog.Infof("Container %q not running: %#v", st.Name, st.State)
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, nil
|
return true
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||||
|
"k8s.io/kubernetes/pkg/util"
|
||||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,6 +68,8 @@ func TestRunOnce(t *testing.T) {
|
|||||||
volumeManager: newVolumeManager(),
|
volumeManager: newVolumeManager(),
|
||||||
diskSpaceManager: diskSpaceManager,
|
diskSpaceManager: diskSpaceManager,
|
||||||
containerRuntime: fakeRuntime,
|
containerRuntime: fakeRuntime,
|
||||||
|
reasonCache: NewReasonCache(),
|
||||||
|
clock: util.RealClock{},
|
||||||
}
|
}
|
||||||
kb.containerManager = cm.NewStubContainerManager()
|
kb.containerManager = cm.NewStubContainerManager()
|
||||||
|
|
||||||
@ -90,6 +93,20 @@ func TestRunOnce(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
podManager.SetPods(pods)
|
podManager.SetPods(pods)
|
||||||
|
// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
|
||||||
|
// the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
|
||||||
|
// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
|
||||||
|
// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
|
||||||
|
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
|
||||||
|
// TODO(random-liu) Fix the test, make it meaningful.
|
||||||
|
fakeRuntime.PodStatus = kubecontainer.PodStatus{
|
||||||
|
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "bar",
|
||||||
|
State: kubecontainer.ContainerStateRunning,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
results, err := kb.runOnce(pods, time.Millisecond)
|
results, err := kb.runOnce(pods, time.Millisecond)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
Loading…
Reference in New Issue
Block a user