mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-28 19:54:35 +00:00
virtcontainers: Keep all filesystem sharing prep code to sandbox.go
With the Linux implementation of the FilesystemSharer interface, we can now remove all host filesystem sharing code from kata_agent and keep it where it belongs: sandbox.go. Signed-off-by: Samuel Ortiz <s.ortiz@apple.com>
This commit is contained in:
parent
61590bbddc
commit
533c1c0e86
@ -176,7 +176,7 @@ type agent interface {
|
||||
markDead(ctx context.Context)
|
||||
|
||||
// cleanup removes all on disk information generated by the agent
|
||||
cleanup(ctx context.Context, s *Sandbox)
|
||||
cleanup(ctx context.Context)
|
||||
|
||||
// return data for saving
|
||||
save() persistapi.AgentState
|
||||
|
102
src/runtime/virtcontainers/fs_share_linux_test.go
Normal file
102
src/runtime/virtcontainers/fs_share_linux_test.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
// Copyright (c) 2022 Apple Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSandboxSharedFilesystem(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("Test disabled as requires root user")
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
// create temporary files to mount:
|
||||
testMountPath, err := os.MkdirTemp("", "sandbox-test")
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(testMountPath)
|
||||
|
||||
// create a new shared directory for our test:
|
||||
kataHostSharedDirSaved := kataHostSharedDir
|
||||
testHostDir, err := os.MkdirTemp("", "kata-Cleanup")
|
||||
assert.NoError(err)
|
||||
kataHostSharedDir = func() string {
|
||||
return testHostDir
|
||||
}
|
||||
defer func() {
|
||||
kataHostSharedDir = kataHostSharedDirSaved
|
||||
}()
|
||||
|
||||
m1Path := filepath.Join(testMountPath, "foo.txt")
|
||||
f1, err := os.Create(m1Path)
|
||||
assert.NoError(err)
|
||||
defer f1.Close()
|
||||
|
||||
m2Path := filepath.Join(testMountPath, "bar.txt")
|
||||
f2, err := os.Create(m2Path)
|
||||
assert.NoError(err)
|
||||
defer f2.Close()
|
||||
|
||||
// create sandbox for mounting into
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "foobar",
|
||||
config: &SandboxConfig{
|
||||
SandboxBindMounts: []string{m1Path, m2Path},
|
||||
},
|
||||
}
|
||||
|
||||
fsShare, err := NewFilesystemShare(sandbox)
|
||||
assert.Nil(err)
|
||||
sandbox.fsShare = fsShare
|
||||
|
||||
// make the shared directory for our test:
|
||||
dir := kataHostSharedDir()
|
||||
err = os.MkdirAll(path.Join(dir, sandbox.id), 0777)
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// Test the prepare function. We expect it to succeed
|
||||
err = sandbox.fsShare.Prepare(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
// Test the Cleanup function. We expect it to succeed for the mount to be removed.
|
||||
err = sandbox.fsShare.Cleanup(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
// After successful Cleanup, verify there are not any mounts left behind.
|
||||
stat := syscall.Stat_t{}
|
||||
mount1CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m1Path))
|
||||
err = syscall.Stat(mount1CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
|
||||
mount2CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m2Path))
|
||||
err = syscall.Stat(mount2CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
|
||||
// Verify that Prepare is idempotent.
|
||||
err = sandbox.fsShare.Prepare(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
err = sandbox.fsShare.Prepare(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
// Verify that Cleanup is idempotent.
|
||||
err = sandbox.fsShare.Cleanup(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
err = sandbox.fsShare.Cleanup(sandbox.ctx)
|
||||
assert.NoError(err)
|
||||
}
|
@ -63,16 +63,12 @@ const (
|
||||
// path to vfio devices
|
||||
vfioPath = "/dev/vfio/"
|
||||
|
||||
sandboxMountsDir = "sandbox-mounts"
|
||||
|
||||
NydusRootFSType = "fuse.nydus-overlayfs"
|
||||
|
||||
// enable debug console
|
||||
kernelParamDebugConsole = "agent.debug_console"
|
||||
kernelParamDebugConsoleVPort = "agent.debug_console_vport"
|
||||
kernelParamDebugConsoleVPortValue = "1026"
|
||||
|
||||
// Restricted permission for shared directory managed by virtiofs
|
||||
sharedDirMode = os.FileMode(0700) | os.ModeDir
|
||||
)
|
||||
|
||||
var (
|
||||
@ -387,79 +383,6 @@ func (k *kataAgent) internalConfigure(ctx context.Context, h Hypervisor, id stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) setupSandboxBindMounts(ctx context.Context, sandbox *Sandbox) (err error) {
|
||||
span, ctx := katatrace.Trace(ctx, k.Logger(), "setupSandboxBindMounts", kataAgentTracingTags)
|
||||
defer span.End()
|
||||
|
||||
if len(sandbox.config.SandboxBindMounts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create subdirectory in host shared path for sandbox mounts
|
||||
sandboxMountDir := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir)
|
||||
sandboxShareDir := filepath.Join(GetSharePath(sandbox.id), sandboxMountsDir)
|
||||
if err := os.MkdirAll(sandboxMountDir, DirMode); err != nil {
|
||||
return fmt.Errorf("Creating sandbox shared mount directory: %v: %w", sandboxMountDir, err)
|
||||
}
|
||||
var mountedList []string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, mnt := range mountedList {
|
||||
if derr := syscall.Unmount(mnt, syscall.MNT_DETACH|UmountNoFollow); derr != nil {
|
||||
k.Logger().WithError(derr).Errorf("Cleanup: couldn't unmount %s", mnt)
|
||||
}
|
||||
}
|
||||
if derr := os.RemoveAll(sandboxMountDir); derr != nil {
|
||||
k.Logger().WithError(derr).Errorf("Cleanup: failed to remove %s", sandboxMountDir)
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
for _, m := range sandbox.config.SandboxBindMounts {
|
||||
mountDest := filepath.Join(sandboxMountDir, filepath.Base(m))
|
||||
// bind-mount each sandbox mount that's defined into the sandbox mounts dir
|
||||
if err := bindMount(ctx, m, mountDest, true, "private"); err != nil {
|
||||
return fmt.Errorf("Mounting sandbox directory: %v to %v: %w", m, mountDest, err)
|
||||
}
|
||||
mountedList = append(mountedList, mountDest)
|
||||
|
||||
mountDest = filepath.Join(sandboxShareDir, filepath.Base(m))
|
||||
if err := remountRo(ctx, mountDest); err != nil {
|
||||
return fmt.Errorf("remount sandbox directory: %v to %v: %w", m, mountDest, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) cleanupSandboxBindMounts(sandbox *Sandbox) error {
|
||||
if sandbox.config == nil || len(sandbox.config.SandboxBindMounts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var retErr error
|
||||
bindmountShareDir := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir)
|
||||
for _, m := range sandbox.config.SandboxBindMounts {
|
||||
mountPath := filepath.Join(bindmountShareDir, filepath.Base(m))
|
||||
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH|UmountNoFollow); err != nil {
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
k.Logger().WithError(err).Errorf("Failed to unmount sandbox bindmount: %v", mountPath)
|
||||
}
|
||||
}
|
||||
if err := os.RemoveAll(bindmountShareDir); err != nil {
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
k.Logger().WithError(err).Errorf("Failed to remove sandbox bindmount directory: %s", bindmountShareDir)
|
||||
}
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (k *kataAgent) configure(ctx context.Context, h Hypervisor, id, sharePath string, config KataAgentConfig) error {
|
||||
span, ctx := katatrace.Trace(ctx, k.Logger(), "configure", kataAgentTracingTags)
|
||||
defer span.End()
|
||||
@ -509,47 +432,10 @@ func (k *kataAgent) configureFromGrpc(ctx context.Context, h Hypervisor, id stri
|
||||
return k.internalConfigure(ctx, h, id, config)
|
||||
}
|
||||
|
||||
func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) (err error) {
|
||||
span, ctx := katatrace.Trace(ctx, k.Logger(), "setupSharedPath", kataAgentTracingTags)
|
||||
defer span.End()
|
||||
|
||||
// create shared path structure
|
||||
sharePath := GetSharePath(sandbox.id)
|
||||
mountPath := getMountPath(sandbox.id)
|
||||
if err := os.MkdirAll(sharePath, sharedDirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(mountPath, DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// slave mount so that future mountpoints under mountPath are shown in sharePath as well
|
||||
if err := bindMount(ctx, mountPath, sharePath, true, "slave"); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if umountErr := syscall.Unmount(sharePath, syscall.MNT_DETACH|UmountNoFollow); umountErr != nil {
|
||||
k.Logger().WithError(umountErr).Errorf("failed to unmount vm share path %s", sharePath)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Setup sandbox bindmounts, if specified:
|
||||
if err = k.setupSandboxBindMounts(ctx, sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
span, ctx := katatrace.Trace(ctx, k.Logger(), "createSandbox", kataAgentTracingTags)
|
||||
defer span.End()
|
||||
|
||||
if err := k.setupSharedPath(ctx, sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
return k.configure(ctx, sandbox.hypervisor, sandbox.id, GetSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
}
|
||||
|
||||
@ -2388,26 +2274,7 @@ func (k *kataAgent) markDead(ctx context.Context) {
|
||||
k.disconnect(ctx)
|
||||
}
|
||||
|
||||
func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
|
||||
if err := k.cleanupSandboxBindMounts(s); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to Cleanup sandbox bindmounts")
|
||||
}
|
||||
|
||||
// Unmount shared path
|
||||
path := GetSharePath(s.id)
|
||||
k.Logger().WithField("path", path).Infof("Cleanup agent")
|
||||
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)
|
||||
}
|
||||
|
||||
// Unmount mount path
|
||||
path = getMountPath(s.id)
|
||||
if err := bindUnmountAllRootfs(ctx, path, s); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
|
||||
}
|
||||
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to Cleanup vm path %s", getSandboxPath(s.id))
|
||||
}
|
||||
func (k *kataAgent) cleanup(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (k *kataAgent) save() persistapi.AgentState {
|
||||
|
@ -822,6 +822,10 @@ func TestAgentCreateContainer(t *testing.T) {
|
||||
hypervisor: &mockHypervisor{},
|
||||
}
|
||||
|
||||
fsShare, err := NewFilesystemShare(sandbox)
|
||||
assert.Nil(err)
|
||||
sandbox.fsShare = fsShare
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
assert.NotNil(store)
|
||||
@ -983,7 +987,7 @@ func TestKataCleanupSandbox(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
|
||||
k := &kataAgent{ctx: context.Background()}
|
||||
k.cleanup(context.Background(), &s)
|
||||
k.cleanup(context.Background())
|
||||
|
||||
_, err = os.Stat(dir)
|
||||
assert.False(os.IsExist(err))
|
||||
@ -1119,119 +1123,6 @@ func TestKataAgentDirs(t *testing.T) {
|
||||
assert.Equal(rafsMountPath(cid), expected)
|
||||
}
|
||||
|
||||
func TestSandboxBindMount(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("Test disabled as requires root user")
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
// create temporary files to mount:
|
||||
testMountPath, err := os.MkdirTemp("", "sandbox-test")
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(testMountPath)
|
||||
|
||||
// create a new shared directory for our test:
|
||||
kataHostSharedDirSaved := kataHostSharedDir
|
||||
testHostDir, err := os.MkdirTemp("", "kata-Cleanup")
|
||||
assert.NoError(err)
|
||||
kataHostSharedDir = func() string {
|
||||
return testHostDir
|
||||
}
|
||||
defer func() {
|
||||
kataHostSharedDir = kataHostSharedDirSaved
|
||||
}()
|
||||
|
||||
m1Path := filepath.Join(testMountPath, "foo.txt")
|
||||
f1, err := os.Create(m1Path)
|
||||
assert.NoError(err)
|
||||
defer f1.Close()
|
||||
|
||||
m2Path := filepath.Join(testMountPath, "bar.txt")
|
||||
f2, err := os.Create(m2Path)
|
||||
assert.NoError(err)
|
||||
defer f2.Close()
|
||||
|
||||
// create sandbox for mounting into
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "foobar",
|
||||
config: &SandboxConfig{
|
||||
SandboxBindMounts: []string{m1Path, m2Path},
|
||||
},
|
||||
}
|
||||
k := &kataAgent{ctx: context.Background()}
|
||||
|
||||
// make the shared directory for our test:
|
||||
dir := kataHostSharedDir()
|
||||
err = os.MkdirAll(path.Join(dir, sandbox.id), 0777)
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sharePath := GetSharePath(sandbox.id)
|
||||
mountPath := getMountPath(sandbox.id)
|
||||
|
||||
err = os.MkdirAll(sharePath, DirMode)
|
||||
assert.Nil(err)
|
||||
err = os.MkdirAll(mountPath, DirMode)
|
||||
assert.Nil(err)
|
||||
|
||||
// setup the expeted slave mount:
|
||||
err = bindMount(sandbox.ctx, mountPath, sharePath, true, "slave")
|
||||
assert.Nil(err)
|
||||
defer syscall.Unmount(sharePath, syscall.MNT_DETACH|UmountNoFollow)
|
||||
|
||||
// Test the function. We expect it to succeed and for the mount to exist
|
||||
err = k.setupSandboxBindMounts(context.Background(), sandbox)
|
||||
assert.NoError(err)
|
||||
|
||||
// Test the Cleanup function. We expect it to succeed for the mount to be removed.
|
||||
err = k.cleanupSandboxBindMounts(sandbox)
|
||||
assert.NoError(err)
|
||||
|
||||
// After successful Cleanup, verify there are not any mounts left behind.
|
||||
stat := syscall.Stat_t{}
|
||||
mount1CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m1Path))
|
||||
err = syscall.Stat(mount1CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
|
||||
mount2CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m2Path))
|
||||
err = syscall.Stat(mount2CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
|
||||
// Now, let's setup the Cleanup to fail. Setup the sandbox bind mount twice, which will result in
|
||||
// extra mounts being present that the sandbox description doesn't account for (ie, duplicate mounts).
|
||||
// We expect Cleanup to fail on the first time, since it cannot remove the sandbox-bindmount directory because
|
||||
// there are leftover mounts. If we run it a second time, however, it should succeed since it'll remove the
|
||||
// second set of mounts:
|
||||
err = k.setupSandboxBindMounts(context.Background(), sandbox)
|
||||
assert.NoError(err)
|
||||
err = k.setupSandboxBindMounts(context.Background(), sandbox)
|
||||
assert.NoError(err)
|
||||
// Test the Cleanup function. We expect it to succeed for the mount to be removed.
|
||||
err = k.cleanupSandboxBindMounts(sandbox)
|
||||
assert.Error(err)
|
||||
err = k.cleanupSandboxBindMounts(sandbox)
|
||||
assert.NoError(err)
|
||||
|
||||
//
|
||||
// Now, let's setup the sandbox bindmount to fail, and verify that no mounts are left behind
|
||||
//
|
||||
sandbox.config.SandboxBindMounts = append(sandbox.config.SandboxBindMounts, "oh-nos")
|
||||
err = k.setupSandboxBindMounts(context.Background(), sandbox)
|
||||
assert.Error(err)
|
||||
// Verify there aren't any mounts left behind
|
||||
stat = syscall.Stat_t{}
|
||||
err = syscall.Stat(mount1CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
err = syscall.Stat(mount2CheckPath, &stat)
|
||||
assert.Error(err)
|
||||
assert.True(os.IsNotExist(err))
|
||||
|
||||
}
|
||||
|
||||
func TestHandleHugepages(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("Test disabled as requires root user")
|
||||
|
@ -224,7 +224,7 @@ func (n *mockAgent) addSwap(ctx context.Context, PCIPath vcTypes.PciPath) error
|
||||
func (n *mockAgent) markDead(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (n *mockAgent) cleanup(ctx context.Context, s *Sandbox) {
|
||||
func (n *mockAgent) cleanup(ctx context.Context) {
|
||||
}
|
||||
|
||||
// save is the Noop agent state saver. It does nothing.
|
||||
|
@ -71,6 +71,11 @@ const (
|
||||
// /sys/fs/cgroup/memory/kata_overhead/$CGPATH where $CGPATH is
|
||||
// defined by the orchestrator.
|
||||
resCtrlKataOverheadID = "/kata_overhead/"
|
||||
|
||||
sandboxMountsDir = "sandbox-mounts"
|
||||
|
||||
// Restricted permission for shared directory managed by virtiofs
|
||||
sharedDirMode = os.FileMode(0700) | os.ModeDir
|
||||
)
|
||||
|
||||
var (
|
||||
@ -191,6 +196,7 @@ type Sandbox struct {
|
||||
hypervisor Hypervisor
|
||||
agent agent
|
||||
store persistapi.PersistDriver
|
||||
fsShare FilesystemSharer
|
||||
|
||||
swapDevices []*config.BlockDrive
|
||||
volumes []types.Volume
|
||||
@ -490,7 +496,13 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Below code path is called only during create, because of earlier Check.
|
||||
// The code below only gets called when initially creating a sandbox, not when restoring or
|
||||
// re-creating it. The above check for the sandbox state enforces that.
|
||||
|
||||
if err := s.fsShare.Prepare(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.agent.createSandbox(ctx, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -544,6 +556,12 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
swapDevices: []*config.BlockDrive{},
|
||||
}
|
||||
|
||||
fsShare, err := NewFilesystemShare(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.fsShare = fsShare
|
||||
|
||||
if s.store, err = persist.GetDriver(); err != nil || s.store == nil {
|
||||
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
|
||||
}
|
||||
@ -791,7 +809,9 @@ func (s *Sandbox) Delete(ctx context.Context) error {
|
||||
s.Logger().WithError(err).Error("failed to Cleanup hypervisor")
|
||||
}
|
||||
|
||||
s.agent.cleanup(ctx, s)
|
||||
if err := s.fsShare.Cleanup(ctx); err != nil {
|
||||
s.Logger().WithError(err).Error("failed to cleanup share files")
|
||||
}
|
||||
|
||||
return s.store.Destroy(s.id)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user