diff --git a/pkg/kubelet/dockertools/fake_docker_client.go b/pkg/kubelet/dockertools/fake_docker_client.go index 88f68a710a5..cdbb5646191 100644 --- a/pkg/kubelet/dockertools/fake_docker_client.go +++ b/pkg/kubelet/dockertools/fake_docker_client.go @@ -31,19 +31,20 @@ import ( // FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup. type FakeDockerClient struct { sync.Mutex - ContainerList []docker.APIContainers - Container *docker.Container - ContainerMap map[string]*docker.Container - Image *docker.Image - Images []docker.APIImages - Err error - called []string - Stopped []string - pulled []string - Created []string - Removed []string - RemovedImages util.StringSet - VersionInfo docker.Env + ContainerList []docker.APIContainers + ExitedContainerList []docker.APIContainers + Container *docker.Container + ContainerMap map[string]*docker.Container + Image *docker.Image + Images []docker.APIImages + Err error + called []string + Stopped []string + pulled []string + Created []string + Removed []string + RemovedImages util.StringSet + VersionInfo docker.Env } func (f *FakeDockerClient) ClearCalls() { @@ -67,6 +68,37 @@ func (f *FakeDockerClient) AssertCalls(calls []string) (err error) { return } +func (f *FakeDockerClient) AssertCreated(created []string) error { + f.Lock() + defer f.Unlock() + + actualCreated := []string{} + for _, c := range f.Created { + dockerName, _, err := ParseDockerName(c) + if err != nil { + return fmt.Errorf("unexpected error: %v", err) + } + actualCreated = append(actualCreated, dockerName.ContainerName) + } + sort.StringSlice(created).Sort() + sort.StringSlice(actualCreated).Sort() + if !reflect.DeepEqual(created, actualCreated) { + return fmt.Errorf("expected %#v, got %#v", created, actualCreated) + } + return nil +} + +func (f *FakeDockerClient) AssertStopped(stopped []string) error { + f.Lock() + defer f.Unlock() + sort.StringSlice(stopped).Sort() + sort.StringSlice(f.Stopped).Sort() + if !reflect.DeepEqual(stopped, f.Stopped) { + return fmt.Errorf("expected %#v, got %#v", stopped, f.Stopped) + } + return nil +} + func (f *FakeDockerClient) AssertUnorderedCalls(calls []string) (err error) { f.Lock() defer f.Unlock() @@ -91,6 +123,10 @@ func (f *FakeDockerClient) ListContainers(options docker.ListContainersOptions) f.Lock() defer f.Unlock() f.called = append(f.called, "list") + + if options.All { + return append(f.ContainerList, f.ExitedContainerList...), f.Err + } return f.ContainerList, f.Err } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 8a14fc41ddc..9de5e4af3d8 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1124,7 +1124,9 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta createPodInfraContainer = true } - podStatus, err := kl.GetPodStatus(podFullName) + // Do not use the cache here since we need the newest status to check + // if we need to restart the container below. + podStatus, err := kl.generatePodStatus(podFullName) if err != nil { glog.Errorf("Unable to get pod with name %q and uid %q info with error(%v)", podFullName, uid, err) return podContainerChangesSpec{}, err diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 1d5187eccf1..6bd5036e6ab 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -3486,3 +3486,122 @@ func TestHostNetworkDisallowed(t *testing.T) { t.Errorf("expected pod infra creation to fail") } } + +func TestSyncPodsWithRestartPolicy(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + fakeDocker := testKubelet.fakeDocker + waitGroup := testKubelet.waitGroup + + containers := []api.Container{ + {Name: "succeeded"}, + {Name: "failed"}, + } + + runningAPIContainers := []docker.APIContainers{ + { + // pod infra container + Names: []string{"/k8s_POD_foo_new_12345678_0"}, + ID: "9876", + }, + } + exitedAPIContainers := []docker.APIContainers{ + { + // format is // k8s___ + Names: []string{"/k8s_succeeded." + strconv.FormatUint(dockertools.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"}, + ID: "1234", + }, + { + // format is // k8s___ + Names: []string{"/k8s_failed." + strconv.FormatUint(dockertools.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0"}, + ID: "5678", + }, + } + + containerMap := map[string]*docker.Container{ + "1234": { + ID: "1234", + Name: "succeeded", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + "5678": { + ID: "5678", + Name: "failed", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 42, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + } + + tests := []struct { + policy api.RestartPolicy + calls []string + created []string + stopped []string + }{ + { + api.RestartPolicyAlways, + []string{"list", "list", "list", "inspect_container", "inspect_container", "inspect_container", "create", "start", "create", "start", "list", "inspect_container", "inspect_container", "inspect_container"}, + []string{"succeeded", "failed"}, + []string{}, + }, + { + api.RestartPolicyOnFailure, + []string{"list", "list", "list", "inspect_container", "inspect_container", "inspect_container", "create", "start", "list", "inspect_container", "inspect_container", "inspect_container"}, + []string{"failed"}, + []string{}, + }, + { + api.RestartPolicyNever, + []string{"list", "list", "list", "inspect_container", "inspect_container", "inspect_container", "stop", "list", "inspect_container", "inspect_container"}, + []string{}, + []string{"9876"}, + }, + } + + for i, tt := range tests { + fakeDocker.ContainerList = runningAPIContainers + fakeDocker.ExitedContainerList = exitedAPIContainers + fakeDocker.ContainerMap = containerMap + fakeDocker.ClearCalls() + pods := []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + RestartPolicy: tt.policy, + }, + }, + } + kubelet.podManager.SetPods(pods) + waitGroup.Add(1) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + if err != nil { + t.Errorf("%d: unexpected error: %v", i, err) + } + waitGroup.Wait() + + // 'stop' is because the pod infra container is killed when no container is running. + verifyCalls(t, fakeDocker, tt.calls) + + if err := fakeDocker.AssertCreated(tt.created); err != nil { + t.Errorf("%d: %v", i, err) + } + if err := fakeDocker.AssertStopped(tt.stopped); err != nil { + t.Errorf("%d: %v", i, err) + } + } +}