Remove redundancy in SyncPods

This commit is contained in:
Tim Hockin 2014-10-19 21:35:08 -07:00
parent 36a05ee871
commit 76c33a88b6
2 changed files with 8 additions and 13 deletions

View File

@ -662,12 +662,7 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
}
// Kill any containers we don't need.
existingContainers, err := dockertools.GetKubeletDockerContainers(kl.dockerClient, false)
if err != nil {
glog.Errorf("Error listing containers: %s", err)
return err
}
for _, container := range existingContainers {
for _, container := range dockerContainers {
// Don't kill containers that are in the desired pods.
podFullName, uuid, containerName, _ := dockertools.ParseDockerName(container.Names[0])
pc := podContainer{podFullName, uuid, containerName}

View File

@ -183,7 +183,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
verifyCalls(t, fakeDocker, []string{"list", "list"})
verifyCalls(t, fakeDocker, []string{"list"})
}
// drainWorkers waits until all workers are done. Should only used for testing.
@ -231,7 +231,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "list", "create", "start", "list", "inspect_container", "list", "create", "start"})
"list", "create", "start", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
@ -279,7 +279,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "list", "create", "start", "list", "inspect_container", "list", "create", "start"})
"list", "create", "start", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
@ -324,7 +324,7 @@ func TestSyncPodsWithNetCreatesContainer(t *testing.T) {
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "inspect_container", "list", "create", "start"})
"list", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
if len(fakeDocker.Created) != 1 ||
@ -376,7 +376,7 @@ func TestSyncPodsWithNetCreatesContainerCallsHandler(t *testing.T) {
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "inspect_container", "list", "create", "start"})
"list", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
if len(fakeDocker.Created) != 1 ||
@ -418,7 +418,7 @@ func TestSyncPodsDeletesWithNoNetContainer(t *testing.T) {
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "list", "stop", "create", "start", "list", "list", "inspect_container", "list", "create", "start"})
"list", "stop", "create", "start", "list", "list", "inspect_container", "list", "create", "start"})
// A map iteration is used to delete containers, so must not depend on
// order here.
@ -455,7 +455,7 @@ func TestSyncPodsDeletes(t *testing.T) {
t.Errorf("unexpected error: %v", err)
}
verifyCalls(t, fakeDocker, []string{"list", "list", "stop", "stop"})
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop"})
// A map iteration is used to delete containers, so must not depend on
// order here.