mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #4076 from brendandburns/fixer
Wait until containers actually finish running before trying to clean up
This commit is contained in:
commit
5fb9009f89
@ -601,6 +601,23 @@ func ParseDockerName(name string) (podFullName string, podUID types.UID, contain
|
||||
return
|
||||
}
|
||||
|
||||
func GetRunningContainers(client DockerInterface, ids []string) ([]*docker.Container, error) {
|
||||
result := []*docker.Container{}
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("unexpected nil docker client.")
|
||||
}
|
||||
for ix := range ids {
|
||||
status, err := client.InspectContainer(ids[ix])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status != nil && status.State.Running {
|
||||
result = append(result, status)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Parses image name including a tag and returns image name and tag.
|
||||
// TODO: Future Docker versions can parse the tag on daemon side, see
|
||||
// https://github.com/dotcloud/docker/issues/6876
|
||||
|
@ -327,3 +327,106 @@ func TestIsImagePresent(t *testing.T) {
|
||||
t.Errorf("expected inspection of image abc:123, instead inspected image %v", cl.imageName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRunningContainers(t *testing.T) {
|
||||
fakeDocker := &FakeDockerClient{}
|
||||
tests := []struct {
|
||||
containers map[string]*docker.Container
|
||||
inputIDs []string
|
||||
expectedIDs []string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
containers: map[string]*docker.Container{
|
||||
"foobar": {
|
||||
ID: "foobar",
|
||||
State: docker.State{
|
||||
Running: false,
|
||||
},
|
||||
},
|
||||
"baz": {
|
||||
ID: "baz",
|
||||
State: docker.State{
|
||||
Running: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
inputIDs: []string{"foobar", "baz"},
|
||||
expectedIDs: []string{"baz"},
|
||||
},
|
||||
{
|
||||
containers: map[string]*docker.Container{
|
||||
"foobar": {
|
||||
ID: "foobar",
|
||||
State: docker.State{
|
||||
Running: true,
|
||||
},
|
||||
},
|
||||
"baz": {
|
||||
ID: "baz",
|
||||
State: docker.State{
|
||||
Running: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
inputIDs: []string{"foobar", "baz"},
|
||||
expectedIDs: []string{"foobar", "baz"},
|
||||
},
|
||||
{
|
||||
containers: map[string]*docker.Container{
|
||||
"foobar": {
|
||||
ID: "foobar",
|
||||
State: docker.State{
|
||||
Running: false,
|
||||
},
|
||||
},
|
||||
"baz": {
|
||||
ID: "baz",
|
||||
State: docker.State{
|
||||
Running: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
inputIDs: []string{"foobar", "baz"},
|
||||
expectedIDs: []string{},
|
||||
},
|
||||
{
|
||||
containers: map[string]*docker.Container{
|
||||
"foobar": {
|
||||
ID: "foobar",
|
||||
State: docker.State{
|
||||
Running: false,
|
||||
},
|
||||
},
|
||||
"baz": {
|
||||
ID: "baz",
|
||||
State: docker.State{
|
||||
Running: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
inputIDs: []string{"foobar", "baz"},
|
||||
err: fmt.Errorf("test error"),
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
fakeDocker.ContainerMap = test.containers
|
||||
fakeDocker.Err = test.err
|
||||
if results, err := GetRunningContainers(fakeDocker, test.inputIDs); err == nil {
|
||||
resultIDs := []string{}
|
||||
for _, result := range results {
|
||||
resultIDs = append(resultIDs, result.ID)
|
||||
}
|
||||
if !reflect.DeepEqual(resultIDs, test.expectedIDs) {
|
||||
t.Errorf("expected: %v, saw: %v", test.expectedIDs, resultIDs)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err != test.err {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1197,11 +1197,21 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
|
||||
|
||||
// Compares the map of current volumes to the map of desired volumes.
|
||||
// If an active volume does not have a respective desired volume, clean it up.
|
||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.BoundPod) error {
|
||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.BoundPod, running []*docker.Container) error {
|
||||
desiredVolumes := getDesiredVolumes(pods)
|
||||
currentVolumes := kl.getPodVolumesFromDisk()
|
||||
runningSet := util.StringSet{}
|
||||
for ix := range running {
|
||||
_, uid, _, _ := dockertools.ParseDockerName(running[ix].Name)
|
||||
runningSet.Insert(string(uid))
|
||||
}
|
||||
for name, vol := range currentVolumes {
|
||||
if _, ok := desiredVolumes[name]; !ok {
|
||||
parts := strings.Split(name, "/")
|
||||
if runningSet.Has(parts[0]) {
|
||||
glog.Infof("volume %s, still has a container running %s, skipping teardown", name, parts[0])
|
||||
continue
|
||||
}
|
||||
//TODO (jonesdl) We should somehow differentiate between volumes that are supposed
|
||||
//to be deleted and volumes that are leftover after a crash.
|
||||
glog.Warningf("Orphaned volume %q found, tearing down volume", name)
|
||||
@ -1250,9 +1260,10 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
|
||||
})
|
||||
}
|
||||
// Kill any containers we don't need.
|
||||
for _, container := range dockerContainers {
|
||||
killed := []string{}
|
||||
for ix := range dockerContainers {
|
||||
// Don't kill containers that are in the desired pods.
|
||||
podFullName, uid, containerName, _ := dockertools.ParseDockerName(container.Names[0])
|
||||
podFullName, uid, containerName, _ := dockertools.ParseDockerName(dockerContainers[ix].Names[0])
|
||||
if _, found := desiredPods[uid]; found {
|
||||
// syncPod() will handle this one.
|
||||
continue
|
||||
@ -1267,15 +1278,23 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
|
||||
pc := podContainer{podFullName, uid, containerName}
|
||||
if _, ok := desiredContainers[pc]; !ok {
|
||||
glog.V(1).Infof("Killing unwanted container %+v", pc)
|
||||
err = kl.killContainer(container)
|
||||
err = kl.killContainer(dockerContainers[ix])
|
||||
if err != nil {
|
||||
glog.Errorf("Error killing container %+v: %v", pc, err)
|
||||
} else {
|
||||
killed = append(killed, dockerContainers[ix].ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
running, err := dockertools.GetRunningContainers(kl.dockerClient, killed)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to poll container state: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove any orphaned volumes.
|
||||
err = kl.cleanupOrphanedVolumes(pods)
|
||||
err = kl.cleanupOrphanedVolumes(pods, running)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -681,7 +681,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
||||
if err := kubelet.SyncPods([]api.BoundPod{}); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop"})
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
||||
|
||||
// A map iteration is used to delete containers, so must not depend on
|
||||
// order here.
|
||||
@ -740,7 +740,7 @@ func TestSyncPodsDeletesWhenContainerSourceReady(t *testing.T) {
|
||||
if err := kubelet.SyncPods([]api.BoundPod{}); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop"})
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
||||
|
||||
// Validate container for testSource are killed because testSource is reported as seen, but
|
||||
// containers for otherSource are not killed because otherSource has not.
|
||||
@ -780,7 +780,7 @@ func TestSyncPodsDeletes(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop"})
|
||||
verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"})
|
||||
|
||||
// A map iteration is used to delete containers, so must not depend on
|
||||
// order here.
|
||||
|
Loading…
Reference in New Issue
Block a user