vc: move container mount cleanup to container.go

For one thing, it is container specific resource so it should not
be cleaned up by the agent. For another thing, we can make container
stop to force cleanup these host mountpoints regardless of hypervisor
and agent liveness.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
Peng Tao 2019-07-21 20:01:04 -07:00
parent e02f6dc067
commit d5d7d82eeb
7 changed files with 32 additions and 20 deletions

View File

@ -254,5 +254,5 @@ type agent interface {
markDead()
// cleanup removes all on disk information generated by the agent
cleanup(id string)
cleanup(s *Sandbox)
}

View File

@ -322,6 +322,9 @@ func TestStartSandboxFailing(t *testing.T) {
}
func TestStopSandboxNoopAgentSuccessful(t *testing.T) {
if tc.NotValid(ktu.NeedRoot()) {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
assert := assert.New(t)
@ -910,6 +913,9 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) {
}
func TestStopContainerNoopAgentSuccessful(t *testing.T) {
if tc.NotValid(ktu.NeedRoot()) {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
assert := assert.New(t)
@ -942,6 +948,9 @@ func TestStopContainerNoopAgentSuccessful(t *testing.T) {
}
func TestStopContainerFailingNoSandbox(t *testing.T) {
if tc.NotValid(ktu.NeedRoot()) {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
contID := "100"
@ -951,6 +960,9 @@ func TestStopContainerFailingNoSandbox(t *testing.T) {
}
func TestStopContainerFailingNoContainer(t *testing.T) {
if tc.NotValid(ktu.NeedRoot()) {
t.Skip(testDisabledAsNonRoot)
}
defer cleanUp()
assert := assert.New(t)

View File

@ -1049,6 +1049,14 @@ func (c *Container) stop(force bool) error {
return err
}
if err := c.unmountHostMounts(); err != nil && !force {
return err
}
if err := bindUnmountContainerRootfs(c.ctx, kataHostSharedDir, c.sandbox.id, c.id); err != nil && !force {
return err
}
if err := c.detachDevices(); err != nil && !force {
return err
}

View File

@ -1419,19 +1419,8 @@ func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error {
span, _ := k.trace("stopContainer")
defer span.Finish()
req := &grpc.RemoveContainerRequest{
ContainerId: c.id,
}
if _, err := k.sendReq(req); err != nil {
return err
}
if err := c.unmountHostMounts(); err != nil {
return err
}
return bindUnmountContainerRootfs(k.ctx, kataHostSharedDir, sandbox.id, c.id)
_, err := k.sendReq(&grpc.RemoveContainerRequest{ContainerId: c.id})
return err
}
func (k *kataAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error {
@ -2075,9 +2064,12 @@ func (k *kataAgent) markDead() {
k.disconnect()
}
func (k *kataAgent) cleanup(id string) {
path := k.getSharePath(id)
func (k *kataAgent) cleanup(s *Sandbox) {
path := k.getSharePath(s.id)
k.Logger().WithField("path", path).Infof("cleanup agent")
if err := bindUnmountAllRootfs(k.ctx, path, s); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount container share path %s", path)
}
if err := os.RemoveAll(path); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup vm share path %s", path)
}

View File

@ -933,8 +933,8 @@ func TestKataCleanupSandbox(t *testing.T) {
err := os.MkdirAll(dir, 0777)
assert.Nil(err)
k := &kataAgent{}
k.cleanup(s.id)
k := &kataAgent{ctx: context.Background()}
k.cleanup(&s)
_, err = os.Stat(dir)
assert.False(os.IsExist(err))

View File

@ -231,5 +231,5 @@ func (n *noopAgent) copyFile(src, dst string) error {
func (n *noopAgent) markDead() {
}
func (n *noopAgent) cleanup(id string) {
func (n *noopAgent) cleanup(s *Sandbox) {
}

View File

@ -764,7 +764,7 @@ func (s *Sandbox) Delete() error {
s.Logger().WithError(err).Error("failed to cleanup hypervisor")
}
s.agent.cleanup(s.id)
s.agent.cleanup(s)
return s.store.Delete()
}