mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-11 04:42:16 +00:00
shm: handle shm mount backed by empty-dir memory volumes
[cherry picked from runtime commit 3c4fe035e8041b44e1f3e06d5247938be9a1db15] Check if shm mount is backed by empty-dir memory based volume. If so let the logic to handle epehemeral volumes take care of this mount, so that shm mount within the container is backed by tmpfs mount within the the container in the VM. Fixes: #323 Signed-off-by: Archana Shinde <archana.m.shinde@intel.com> Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
parent
eed66021da
commit
a976548fb2
@ -1130,22 +1130,32 @@ func (k *kataAgent) constraintGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool) {
|
||||
grpcSpec.Linux.Devices = linuxDevices
|
||||
}
|
||||
|
||||
func (k *kataAgent) handleShm(grpcSpec *grpc.Spec, sandbox *Sandbox) {
|
||||
for idx, mnt := range grpcSpec.Mounts {
|
||||
func (k *kataAgent) handleShm(mounts []specs.Mount, sandbox *Sandbox) {
|
||||
for idx, mnt := range mounts {
|
||||
if mnt.Destination != "/dev/shm" {
|
||||
continue
|
||||
}
|
||||
|
||||
// If /dev/shm for a container is backed by an ephemeral volume, skip
|
||||
// bind-mounting it to the sandbox shm.
|
||||
// A later call to handleEphemeralStorage should take care of setting up /dev/shm correctly.
|
||||
if mnt.Type == KataEphemeralDevType {
|
||||
continue
|
||||
}
|
||||
|
||||
// A container shm mount is shared with sandbox shm mount.
|
||||
if sandbox.shmSize > 0 {
|
||||
grpcSpec.Mounts[idx].Type = "bind"
|
||||
grpcSpec.Mounts[idx].Options = []string{"rbind"}
|
||||
grpcSpec.Mounts[idx].Source = filepath.Join(kataGuestSandboxDir(), shmDir)
|
||||
mounts[idx].Type = "bind"
|
||||
mounts[idx].Options = []string{"rbind"}
|
||||
mounts[idx].Source = filepath.Join(kataGuestSandboxDir(), shmDir)
|
||||
k.Logger().WithField("shm-size", sandbox.shmSize).Info("Using sandbox shm")
|
||||
} else {
|
||||
// This should typically not happen, as a sandbox shm mount is always set up by the
|
||||
// upper stack.
|
||||
sizeOption := fmt.Sprintf("size=%d", DefaultShmSize)
|
||||
grpcSpec.Mounts[idx].Type = "tmpfs"
|
||||
grpcSpec.Mounts[idx].Source = "shm"
|
||||
grpcSpec.Mounts[idx].Options = []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption}
|
||||
mounts[idx].Type = "tmpfs"
|
||||
mounts[idx].Source = "shm"
|
||||
mounts[idx].Options = []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption}
|
||||
k.Logger().WithField("shm-size", sizeOption).Info("Setting up a separate shm for container")
|
||||
}
|
||||
}
|
||||
@ -1382,6 +1392,8 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.handleShm(ociSpec.Mounts, sandbox)
|
||||
|
||||
epheStorages := k.handleEphemeralStorage(ociSpec.Mounts)
|
||||
ctrStorages = append(ctrStorages, epheStorages...)
|
||||
|
||||
@ -1432,8 +1444,6 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
// irrelevant information to the agent.
|
||||
k.constraintGRPCSpec(grpcSpec, passSeccomp)
|
||||
|
||||
k.handleShm(grpcSpec, sandbox)
|
||||
|
||||
req := &grpc.CreateContainerRequest{
|
||||
ContainerId: c.id,
|
||||
ExecId: c.id,
|
||||
|
@ -663,33 +663,50 @@ func TestHandleShm(t *testing.T) {
|
||||
shmSize: 8192,
|
||||
}
|
||||
|
||||
g := &pb.Spec{
|
||||
Hooks: &pb.Hooks{},
|
||||
Mounts: []pb.Mount{
|
||||
{Destination: "/dev/shm"},
|
||||
},
|
||||
var ociMounts []specs.Mount
|
||||
|
||||
mount := specs.Mount{
|
||||
Type: "bind",
|
||||
Destination: "/dev/shm",
|
||||
}
|
||||
|
||||
k.handleShm(g, sandbox)
|
||||
ociMounts = append(ociMounts, mount)
|
||||
k.handleShm(ociMounts, sandbox)
|
||||
|
||||
assert.Len(g.Mounts, 1)
|
||||
assert.NotEmpty(g.Mounts[0].Destination)
|
||||
assert.Equal(g.Mounts[0].Destination, "/dev/shm")
|
||||
assert.Equal(g.Mounts[0].Type, "bind")
|
||||
assert.NotEmpty(g.Mounts[0].Source, filepath.Join(kataGuestSharedDir(), shmDir))
|
||||
assert.Equal(g.Mounts[0].Options, []string{"rbind"})
|
||||
assert.Len(ociMounts, 1)
|
||||
assert.NotEmpty(ociMounts[0].Destination)
|
||||
assert.Equal(ociMounts[0].Destination, "/dev/shm")
|
||||
assert.Equal(ociMounts[0].Type, "bind")
|
||||
assert.NotEmpty(ociMounts[0].Source, filepath.Join(kataGuestSharedDir(), shmDir))
|
||||
assert.Equal(ociMounts[0].Options, []string{"rbind"})
|
||||
|
||||
sandbox.shmSize = 0
|
||||
k.handleShm(g, sandbox)
|
||||
|
||||
assert.Len(g.Mounts, 1)
|
||||
assert.NotEmpty(g.Mounts[0].Destination)
|
||||
assert.Equal(g.Mounts[0].Destination, "/dev/shm")
|
||||
assert.Equal(g.Mounts[0].Type, "tmpfs")
|
||||
assert.Equal(g.Mounts[0].Source, "shm")
|
||||
k.handleShm(ociMounts, sandbox)
|
||||
|
||||
assert.Len(ociMounts, 1)
|
||||
assert.Equal(ociMounts[0].Destination, "/dev/shm")
|
||||
assert.Equal(ociMounts[0].Type, "tmpfs")
|
||||
assert.Equal(ociMounts[0].Source, "shm")
|
||||
sizeOption := fmt.Sprintf("size=%d", DefaultShmSize)
|
||||
assert.Equal(g.Mounts[0].Options, []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption})
|
||||
assert.Equal(ociMounts[0].Options, []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption})
|
||||
|
||||
// In case the type of mount is ephemeral, the container mount is not
|
||||
// shared with the sandbox shm.
|
||||
ociMounts[0].Type = KataEphemeralDevType
|
||||
mountSource := "/tmp/mountPoint"
|
||||
ociMounts[0].Source = mountSource
|
||||
k.handleShm(ociMounts, sandbox)
|
||||
|
||||
assert.Len(ociMounts, 1)
|
||||
assert.Equal(ociMounts[0].Type, KataEphemeralDevType)
|
||||
assert.NotEmpty(ociMounts[0].Source, mountSource)
|
||||
|
||||
epheStorages := k.handleEphemeralStorage(ociMounts)
|
||||
epheMountPoint := epheStorages[0].MountPoint
|
||||
expected := filepath.Join(ephemeralPath(), filepath.Base(mountSource))
|
||||
assert.Equal(epheMountPoint, expected,
|
||||
"Ephemeral mount point didn't match: got %s, expecting %s", epheMountPoint, expected)
|
||||
|
||||
}
|
||||
|
||||
func testIsPidNamespacePresent(grpcSpec *pb.Spec) bool {
|
||||
|
Loading…
Reference in New Issue
Block a user