shimv2: shutdown the sandbox when sandbox container exited

Kubelet would cleanup the pod cgroup resources and kill the processes
in the pod cgroups when it detected all of the containers in a pod exited,
thus shimv2 should close the hypervisor process once the podsandbox container
exited, otherwise, the hypervisor process would be killed by kubelet and
made shimv2 failed to shutdown the sandbox.

Fixes:#1672

Signed-off-by: lifupan <lifupan@gmail.com>
This commit is contained in:
lifupan 2019-05-22 10:03:24 +08:00 committed by Fuapn Li
parent 0d535f56e5
commit f301c957f6
4 changed files with 36 additions and 37 deletions

View File

@ -6,7 +6,6 @@
package containerdshim
import (
"sync"
"time"
"github.com/containerd/containerd/api/types/task"
@ -31,7 +30,6 @@ type container struct {
stderr string
bundle string
cType vc.ContainerType
mu sync.Mutex
exit uint32
status task.Status
terminal bool

View File

@ -17,21 +17,21 @@ import (
)
func deleteContainer(ctx context.Context, s *service, c *container) error {
status, err := s.sandbox.StatusContainer(c.id)
if err != nil {
return err
}
if status.State.State != types.StateStopped {
_, err = s.sandbox.StopContainer(c.id)
if !c.cType.IsSandbox() {
status, err := s.sandbox.StatusContainer(c.id)
if err != nil {
return err
}
}
if status.State.State != types.StateStopped {
_, err = s.sandbox.StopContainer(c.id)
if err != nil {
return err
}
}
_, err = s.sandbox.DeleteContainer(c.id)
if err != nil {
return err
if _, err = s.sandbox.DeleteContainer(c.id); err != nil {
return err
}
}
// Run post-stop OCI hooks.

View File

@ -431,27 +431,10 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
}
if r.ExecID == "" {
err = deleteContainer(ctx, s, c)
if err != nil {
if err = deleteContainer(ctx, s, c); err != nil {
return nil, err
}
// Take care of the use case where it is a sandbox.
// Right after the container representing the sandbox has
// been deleted, let's make sure we stop and delete the
// sandbox.
if c.cType.IsSandbox() {
if err = s.sandbox.Stop(); err != nil {
logrus.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox")
return nil, err
}
if err = s.sandbox.Delete(); err != nil {
logrus.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox")
return nil, err
}
}
s.send(&eventstypes.TaskDelete{
ContainerID: c.id,
Pid: s.pid,

View File

@ -42,23 +42,41 @@ func wait(s *service, c *container, execID string) (int32, error) {
}
timeStamp := time.Now()
c.mu.Lock()
s.mu.Lock()
if execID == "" {
// Take care of the use case where it is a sandbox.
// Right after the container representing the sandbox has
// been deleted, let's make sure we stop and delete the
// sandbox.
if c.cType.IsSandbox() {
if err = s.sandbox.Stop(); err != nil {
logrus.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox")
}
if err = s.sandbox.Delete(); err != nil {
logrus.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox")
}
} else {
if _, err = s.sandbox.StopContainer(c.id); err != nil {
logrus.WithError(err).WithField("container", c.id).Warn("stop container failed")
}
}
c.status = task.StatusStopped
c.exit = uint32(ret)
c.exitTime = timeStamp
c.exitCh <- uint32(ret)
} else {
execs.status = task.StatusStopped
execs.exitCode = ret
execs.exitTime = timeStamp
}
c.mu.Unlock()
if execID == "" {
c.exitCh <- uint32(ret)
} else {
execs.exitCh <- uint32(ret)
}
s.mu.Unlock()
go cReap(s, int(ret), c.id, execID, timeStamp)