vc: change container rootfs to be a mount

We can use the same data structure to describe both of them.
So that we can handle them similarly.

Fixes: #1566

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
Peng Tao 2019-04-18 02:36:52 -07:00
parent b218229589
commit 196661bc0d
19 changed files with 83 additions and 212 deletions

View File

@ -128,7 +128,7 @@ func create(ctx context.Context, containerID, bundlePath, console, pidFilePath s
disableOutput := noNeedForOutput(detach, ociSpec.Process.Terminal)
//rootfs has been mounted by containerd shim
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
var process vc.Process
switch containerType {

View File

@ -10,12 +10,13 @@ package containerdshim
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/containerd/typeurl"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
"github.com/pkg/errors"
"os"
"path/filepath"
taskAPI "github.com/containerd/containerd/runtime/v2/task"
@ -31,7 +32,7 @@ import (
)
func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest, netns string) (*container, error) {
rootFs := vc.RootFs{Mounted: s.mount}
rootFs := vc.Mount{Mounted: s.mount}
if len(r.Rootfs) == 1 {
m := r.Rootfs[0]
rootFs.Source = m.Source

View File

@ -172,7 +172,7 @@ func SetEphemeralStorageType(ociSpec oci.CompatOCISpec) oci.CompatOCISpec {
}
// CreateSandbox create a sandbox container
func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec oci.CompatOCISpec, runtimeConfig oci.RuntimeConfig, rootFs vc.RootFs,
func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec oci.CompatOCISpec, runtimeConfig oci.RuntimeConfig, rootFs vc.Mount,
containerID, bundlePath, console string, disableOutput, systemdCgroup, builtIn bool) (vc.VCSandbox, vc.Process, error) {
span, ctx := Trace(ctx, "createSandbox")
defer span.Finish()
@ -237,7 +237,7 @@ func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec oci.CompatOCISpec, ru
}
// CreateContainer create a container
func CreateContainer(ctx context.Context, vci vc.VC, sandbox vc.VCSandbox, ociSpec oci.CompatOCISpec, rootFs vc.RootFs, containerID, bundlePath, console string, disableOutput, builtIn bool) (vc.Process, error) {
func CreateContainer(ctx context.Context, vci vc.VC, sandbox vc.VCSandbox, ociSpec oci.CompatOCISpec, rootFs vc.Mount, containerID, bundlePath, console string, disableOutput, builtIn bool) (vc.Process, error) {
var c vc.VCContainer
span, ctx := Trace(ctx, "createContainer")

View File

@ -306,7 +306,7 @@ func TestCreateSandboxConfigFail(t *testing.T) {
Quota: &quota,
}
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
_, _, err = CreateSandbox(context.Background(), testingImpl, spec, runtimeConfig, rootFs, testContainerID, bundlePath, testConsole, true, true, false)
assert.Error(err)
@ -342,7 +342,7 @@ func TestCreateSandboxFail(t *testing.T) {
spec, err := readOCIConfigFile(ociConfigFile)
assert.NoError(err)
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
_, _, err = CreateSandbox(context.Background(), testingImpl, spec, runtimeConfig, rootFs, testContainerID, bundlePath, testConsole, true, true, false)
assert.Error(err)
@ -381,7 +381,7 @@ func TestCreateContainerContainerConfigFail(t *testing.T) {
err = writeOCIConfigFile(spec, ociConfigFile)
assert.NoError(err)
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
for _, disableOutput := range []bool{true, false} {
_, err = CreateContainer(context.Background(), testingImpl, nil, spec, rootFs, testContainerID, bundlePath, testConsole, disableOutput, false)
@ -424,7 +424,7 @@ func TestCreateContainerFail(t *testing.T) {
err = writeOCIConfigFile(spec, ociConfigFile)
assert.NoError(err)
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
for _, disableOutput := range []bool{true, false} {
_, err = CreateContainer(context.Background(), testingImpl, nil, spec, rootFs, testContainerID, bundlePath, testConsole, disableOutput, false)
@ -474,7 +474,7 @@ func TestCreateContainer(t *testing.T) {
err = writeOCIConfigFile(spec, ociConfigFile)
assert.NoError(err)
rootFs := vc.RootFs{Mounted: true}
rootFs := vc.Mount{Mounted: true}
for _, disableOutput := range []bool{true, false} {
_, err = CreateContainer(context.Background(), testingImpl, nil, spec, rootFs, testContainerID, bundlePath, testConsole, disableOutput, false)

View File

@ -601,7 +601,7 @@ func statusContainer(sandbox *Sandbox, containerID string) (ContainerStatus, err
State: container.state,
PID: container.process.Pid,
StartTime: container.process.StartTime,
RootFs: container.config.RootFs.Target,
RootFs: container.config.RootFs.Destination,
Annotations: container.config.Annotations,
}, nil
}

View File

@ -62,7 +62,7 @@ func newTestSandboxConfigNoop() SandboxConfig {
// Define the container command and bundle.
container := ContainerConfig{
ID: containerID,
RootFs: RootFs{Target: filepath.Join(testDir, testBundle), Mounted: true},
RootFs: Mount{Destination: filepath.Join(testDir, testBundle), Mounted: true},
Cmd: newBasicTestCmd(),
Annotations: containerAnnotations,
}
@ -751,7 +751,7 @@ func newTestContainerConfigNoop(contID string) ContainerConfig {
// Define the container command and bundle.
container := ContainerConfig{
ID: contID,
RootFs: RootFs{Target: filepath.Join(testDir, testBundle), Mounted: true},
RootFs: Mount{Destination: filepath.Join(testDir, testBundle), Mounted: true},
Cmd: newBasicTestCmd(),
Annotations: containerAnnotations,
}

View File

@ -209,7 +209,7 @@ type ContainerConfig struct {
ID string
// RootFs is the container workload image on the host.
RootFs RootFs
RootFs Mount
// ReadOnlyRootfs indicates if the rootfs should be mounted readonly
ReadonlyRootfs bool
@ -272,27 +272,13 @@ type ContainerDevice struct {
GID uint32
}
// RootFs describes the container's rootfs.
type RootFs struct {
// Source specifies the BlockDevice path
Source string
// Target specify where the rootfs is mounted if it has been mounted
Target string
// Type specifies the type of filesystem to mount.
Type string
// Options specifies zero or more fstab style mount options.
Options []string
// Mounted specifies whether the rootfs has be mounted or not
Mounted bool
}
// Container is composed of a set of containers and a runtime environment.
// A Container can be created, deleted, started, stopped, listed, entered, paused and restored.
type Container struct {
id string
sandboxID string
rootFs RootFs
rootFs Mount
config *ContainerConfig
@ -371,19 +357,6 @@ func (c *Container) SetPid(pid int) error {
return c.storeProcess()
}
func (c *Container) setStateFstype(fstype string) error {
c.state.Fstype = fstype
if !c.sandbox.supportNewStore() {
// experimental runtime use "persist.json" which doesn't need "state.json" anymore
if err := c.storeState(); err != nil {
return err
}
}
return nil
}
// GetAnnotations returns container's annotations
func (c *Container) GetAnnotations() map[string]string {
return c.config.Annotations
@ -411,10 +384,6 @@ func (c *Container) storeDevices() error {
return c.store.Store(store.DeviceIDs, c.devices)
}
func (c *Container) storeState() error {
return c.store.Store(store.State, c.state)
}
func (c *Container) loadMounts() ([]Mount, error) {
var mounts []Mount
if err := c.store.Load(store.Mounts, &mounts); err != nil {
@ -1183,6 +1152,14 @@ func (c *Container) resume() error {
}
func (c *Container) hotplugDrive() error {
if err := c.hotplugRootfsDrive(); err != nil {
return err
}
return nil
}
func (c *Container) hotplugRootfsDrive() error {
var dev device
var err error
@ -1192,7 +1169,7 @@ func (c *Container) hotplugDrive() error {
// there is no "rootfs" dir on block device backed rootfs
c.rootfsSuffix = ""
} else {
dev, err = getDeviceForPath(c.rootFs.Target)
dev, err = getDeviceForPath(c.rootFs.Destination)
}
if err == errMountPointNotFound {
@ -1221,7 +1198,7 @@ func (c *Container) hotplugDrive() error {
devicePath := c.rootFs.Source
fsType := c.rootFs.Type
if c.rootFs.Mounted {
if dev.mountPoint == c.rootFs.Target {
if dev.mountPoint == c.rootFs.Destination {
c.rootfsSuffix = ""
}
// If device mapper device, then fetch the full path of the device
@ -1229,6 +1206,7 @@ func (c *Container) hotplugDrive() error {
if err != nil {
return err
}
c.rootFs.Type = fsType
}
devicePath, err = filepath.EvalSymlinks(devicePath)
@ -1241,11 +1219,7 @@ func (c *Container) hotplugDrive() error {
"fs-type": fsType,
}).Info("Block device detected")
if err = c.plugDevice(devicePath); err != nil {
return err
}
return c.setStateFstype(fsType)
return c.plugDevice(devicePath)
}
func (c *Container) plugDevice(devicePath string) error {
@ -1266,7 +1240,7 @@ func (c *Container) plugDevice(devicePath string) error {
return fmt.Errorf("device manager failed to create rootfs device for %q: %v", devicePath, err)
}
c.state.BlockDeviceID = b.DeviceID()
c.rootFs.BlockDeviceID = b.DeviceID()
// attach rootfs device
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil {
@ -1280,16 +1254,11 @@ func (c *Container) plugDevice(devicePath string) error {
return nil
}
// isDriveUsed checks if a drive has been used for container rootfs
func (c *Container) isDriveUsed() bool {
return !(c.state.Fstype == "")
}
func (c *Container) removeDrive() (err error) {
if c.isDriveUsed() {
if c.rootFs.BlockDeviceID != "" {
c.Logger().Info("unplugging block device")
devID := c.state.BlockDeviceID
devID := c.rootFs.BlockDeviceID
err := c.sandbox.devManager.DetachDevice(devID, c.sandbox)
if err != nil && err != manager.ErrDeviceNotAttached {
return err

View File

@ -106,7 +106,7 @@ func TestContainerRemoveDrive(t *testing.T) {
id: "testContainer",
}
container.state.Fstype = ""
container.rootFs.Type = ""
err = container.removeDrive()
// hotplugRemoveDevice for hypervisor should not be called.
@ -131,8 +131,8 @@ func TestContainerRemoveDrive(t *testing.T) {
err = sandbox.storeSandboxDevices()
assert.Nil(t, err)
container.state.Fstype = "xfs"
container.state.BlockDeviceID = device.DeviceID()
container.rootFs.Type = "xfs"
container.rootFs.BlockDeviceID = device.DeviceID()
err = container.removeDrive()
assert.Nil(t, err, "remove drive should succeed")
}
@ -245,7 +245,7 @@ func TestContainerAddDriveDir(t *testing.T) {
container := Container{
sandbox: sandbox,
id: contID,
rootFs: RootFs{Target: fakeRootfs, Mounted: true},
rootFs: Mount{Destination: fakeRootfs, Mounted: true},
}
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id)
@ -272,14 +272,14 @@ func TestContainerAddDriveDir(t *testing.T) {
checkStorageDriver = savedFunc
}()
container.state.Fstype = ""
container.rootFs.Type = "xfs"
err = container.hotplugDrive()
if err != nil {
t.Fatalf("Error with hotplugDrive :%v", err)
}
if container.state.Fstype == "" {
if container.rootFs.Type == "" {
t.Fatal()
}
}
@ -315,7 +315,7 @@ func TestContainerRootfsPath(t *testing.T) {
container := Container{
id: "rootfstestcontainerid",
sandbox: sandbox,
rootFs: RootFs{Target: fakeRootfs, Mounted: true},
rootFs: Mount{Destination: fakeRootfs, Mounted: true},
rootfsSuffix: "rootfs",
}
cvcstore, err := store.NewVCContainerStore(context.Background(),
@ -328,7 +328,7 @@ func TestContainerRootfsPath(t *testing.T) {
assert.Empty(t, container.rootfsSuffix)
// Reset the value to test the other case
container.rootFs = RootFs{Target: fakeRootfs + "/rootfs", Mounted: true}
container.rootFs = Mount{Destination: fakeRootfs + "/rootfs", Mounted: true}
container.rootfsSuffix = "rootfs"
container.hotplugDrive()

View File

@ -14,7 +14,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/types"
)
var containerRootfs = vc.RootFs{Target: "/var/lib/container/bundle/", Mounted: true}
var containerRootfs = vc.Mount{Destination: "/var/lib/container/bundle/", Mounted: true}
// This example creates and starts a single container sandbox,
// using qemu as the hypervisor and kata as the VM agent.

View File

@ -944,7 +944,7 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
}
func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if c.state.Fstype != "" && c.state.BlockDeviceID != "" {
if c.rootFs.BlockDeviceID != "" {
// The rootfs storage volume represents the container rootfs
// mount point inside the guest.
// It can be a block based device (when using block based container
@ -953,10 +953,10 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
rootfs := &grpc.Storage{}
// This is a block based device rootfs.
device := sandbox.devManager.GetDeviceByID(c.state.BlockDeviceID)
device := sandbox.devManager.GetDeviceByID(c.rootFs.BlockDeviceID)
if device == nil {
k.Logger().WithField("device", c.state.BlockDeviceID).Error("failed to find device by id")
return nil, fmt.Errorf("failed to find device by id %q", c.state.BlockDeviceID)
k.Logger().WithField("device", c.rootFs.BlockDeviceID).Error("failed to find device by id")
return nil, fmt.Errorf("failed to find device by id %q", c.rootFs.BlockDeviceID)
}
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
@ -976,10 +976,11 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
rootfs.Source = blockDrive.SCSIAddr
}
rootfs.MountPoint = rootPathParent
rootfs.Fstype = c.state.Fstype
rootfs.Fstype = c.rootFs.Type
rootfs.Options = c.rootFs.Options
if c.state.Fstype == "xfs" {
rootfs.Options = []string{"nouuid"}
if rootfs.Fstype == "xfs" {
rootfs.Options = append(rootfs.Options, "nouuid")
}
return rootfs, nil
@ -993,7 +994,7 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
// (kataGuestSharedDir) is already mounted in the
// guest. We only need to mount the rootfs from
// the host and it will show up in the guest.
if err := bindMountContainerRootfs(k.ctx, kataHostSharedDir, sandbox.id, c.id, c.rootFs.Target, false); err != nil {
if err := bindMountContainerRootfs(k.ctx, kataHostSharedDir, sandbox.id, c.id, c.rootFs.Destination, false); err != nil {
return nil, err
}

View File

@ -735,9 +735,7 @@ func TestAgentCreateContainer(t *testing.T) {
id: "barfoo",
sandboxID: "foobar",
sandbox: sandbox,
state: types.ContainerState{
Fstype: "xfs",
},
state: types.ContainerState{},
config: &ContainerConfig{
Annotations: map[string]string{},
},

View File

@ -304,7 +304,10 @@ func bindMountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID, cR
// Mount describes a container mount.
type Mount struct {
Source string
// Mount source
Source string
// Mount destination in the guest
Destination string
// Type specifies the type of filesystem to mount.
@ -316,13 +319,16 @@ type Mount struct {
// HostPath used to store host side bind mount path
HostPath string
// ReadOnly specifies if the mount should be read only or not
ReadOnly bool
// BlockDeviceID represents block device that is attached to the
// VM in case this mount is a block device file or a directory
// backed by a block device.
BlockDeviceID string
// ReadOnly specifies if the mount should be read only or not
ReadOnly bool
// Mounted specifies if the target has been mounted on the host
Mounted bool
}
func bindUnmountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID string) error {
@ -341,7 +347,7 @@ func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbo
for _, c := range sandbox.containers {
c.unmountHostMounts()
if c.state.Fstype == "" {
if c.rootFs.Type == "" {
// Need to check for error returned by this call.
// See: https://github.com/containers/virtcontainers/issues/295
bindUnmountContainerRootfs(c.ctx, sharedDir, sandbox.id, c.id)

View File

@ -31,9 +31,9 @@ func (s *Sandbox) dumpState(ss *persistapi.SandboxState, cs map[string]persistap
state = v
}
state.State = string(cont.state.State)
state.Rootfs = persistapi.RootfsState{
BlockDeviceID: cont.state.BlockDeviceID,
FsType: cont.state.Fstype,
state.Rootfs = persistapi.Mount{
BlockDeviceID: cont.rootFs.BlockDeviceID,
Type: cont.rootFs.Type,
}
state.CgroupPath = cont.state.CgroupPath
cs[id] = state
@ -152,10 +152,8 @@ func (c *Container) Restore() error {
}
c.state = types.ContainerState{
State: types.StateString(cs[c.id].State),
BlockDeviceID: cs[c.id].Rootfs.BlockDeviceID,
Fstype: cs[c.id].Rootfs.FsType,
CgroupPath: cs[c.id].CgroupPath,
State: types.StateString(cs[c.id].State),
CgroupPath: cs[c.id].CgroupPath,
}
return nil

View File

@ -47,23 +47,16 @@ type Mount struct {
// HostPath used to store host side bind mount path
HostPath string
// ReadOnly specifies if the mount should be read only or not
ReadOnly bool
// BlockDeviceID represents block device that is attached to the
// VM in case this mount is a block device file or a directory
// backed by a block device.
BlockDeviceID string
}
// RootfsState saves state of container rootfs
type RootfsState struct {
// BlockDeviceID represents container rootfs block device ID
// when backed by devicemapper
BlockDeviceID string
// ReadOnly specifies if the mount should be read only or not
ReadOnly bool
// RootFStype is file system of the rootfs incase it is block device
FsType string
// Mounted specifies if the target has been mounted on the host
Mounted bool
}
// Process gathers data related to a container process.
@ -90,7 +83,7 @@ type ContainerState struct {
State string
// Rootfs contains information of container rootfs
Rootfs RootfsState
Rootfs Mount
// CgroupPath is the cgroup hierarchy where sandbox's processes
// including the hypervisor are placed.

View File

@ -183,6 +183,7 @@ func newMount(m spec.Mount) vc.Mount {
Destination: m.Destination,
Type: m.Type,
Options: m.Options,
Mounted: true, // All OCI volumes are mounted on the host ATM.
}
}
@ -524,12 +525,12 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
return vc.ContainerConfig{}, err
}
rootfs := vc.RootFs{Target: ocispec.Root.Path, Mounted: true}
if !filepath.IsAbs(rootfs.Target) {
rootfs.Target = filepath.Join(bundlePath, ocispec.Root.Path)
rootfs := vc.Mount{Destination: ocispec.Root.Path, Mounted: true}
if !filepath.IsAbs(rootfs.Destination) {
rootfs.Destination = filepath.Join(bundlePath, ocispec.Root.Path)
}
ociLog.Debugf("container rootfs: %s", rootfs.Target)
ociLog.Debugf("container rootfs: %s", rootfs.Destination)
cmd := types.Cmd{
Args: ocispec.Process.Args,

View File

@ -150,6 +150,7 @@ func TestMinimalSandboxConfig(t *testing.T) {
Type: "proc",
Options: nil,
HostPath: "",
Mounted: true,
},
{
Source: "tmpfs",
@ -157,6 +158,7 @@ func TestMinimalSandboxConfig(t *testing.T) {
Type: "tmpfs",
Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
HostPath: "",
Mounted: true,
},
{
Source: "devpts",
@ -164,6 +166,7 @@ func TestMinimalSandboxConfig(t *testing.T) {
Type: "devpts",
Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
HostPath: "",
Mounted: true,
},
}
@ -200,7 +203,7 @@ func TestMinimalSandboxConfig(t *testing.T) {
expectedContainerConfig := vc.ContainerConfig{
ID: containerID,
RootFs: vc.RootFs{Target: path.Join(tempBundlePath, "rootfs"), Mounted: true},
RootFs: vc.Mount{Destination: path.Join(tempBundlePath, "rootfs"), Mounted: true},
ReadonlyRootfs: true,
Cmd: expectedCmd,
Annotations: map[string]string{

View File

@ -291,7 +291,7 @@ func (s *Sandbox) Status() SandboxStatus {
for _, c := range s.containers {
rootfs := c.config.RootFs.Source
if c.config.RootFs.Mounted {
rootfs = c.config.RootFs.Target
rootfs = c.config.RootFs.Destination
}
contStatusList = append(contStatusList, ContainerStatus{
@ -1241,7 +1241,7 @@ func (s *Sandbox) StatusContainer(containerID string) (ContainerStatus, error) {
for id, c := range s.containers {
rootfs := c.config.RootFs.Source
if c.config.RootFs.Mounted {
rootfs = c.config.RootFs.Target
rootfs = c.config.RootFs.Destination
}
if id == containerID {
return ContainerStatus{

View File

@ -7,7 +7,6 @@ package virtcontainers
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
@ -750,99 +749,6 @@ func TestSandboxGetContainer(t *testing.T) {
}
}
func TestContainerStateSetFstype(t *testing.T) {
var err error
containers := []ContainerConfig{
{
ID: "100",
Annotations: containerAnnotations,
},
}
hConfig := newHypervisorConfig(nil, nil)
sandbox, err := testCreateSandbox(t, testSandboxID, MockHypervisor, hConfig, NoopAgentType, NetworkConfig{}, containers, nil)
assert.Nil(t, err)
defer cleanUp()
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(t, err)
sandbox.store = vcStore
c := sandbox.GetContainer("100")
if c == nil {
t.Fatal()
}
cImpl, ok := c.(*Container)
assert.True(t, ok)
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID())
if err != nil {
t.Fatal(err)
}
cImpl.store = containerStore
path := store.ContainerRuntimeRootPath(testSandboxID, c.ID())
stateFilePath := filepath.Join(path, store.StateFile)
f, err := os.Create(stateFilePath)
if err != nil {
t.Fatal(err)
}
state := types.ContainerState{
State: "ready",
Fstype: "vfs",
}
cImpl.state = state
stateData := `{
"state":"ready",
"fstype":"vfs",
}`
n, err := f.WriteString(stateData)
if err != nil || n != len(stateData) {
f.Close()
t.Fatal()
}
f.Close()
newFstype := "ext4"
if err := cImpl.setStateFstype(newFstype); err != nil {
t.Fatal(err)
}
if cImpl.state.Fstype != newFstype {
t.Fatal()
}
fileData, err := ioutil.ReadFile(stateFilePath)
if err != nil {
t.Fatal()
}
// experimental features doesn't write state.json
if sandbox.supportNewStore() {
return
}
var res types.ContainerState
err = json.Unmarshal([]byte(string(fileData)), &res)
if err != nil {
t.Fatal(err)
}
if res.Fstype != newFstype {
t.Fatal()
}
if res.State != state.State {
t.Fatal()
}
}
const vfioPath = "/dev/vfio/"
func TestSandboxAttachDevicesVFIO(t *testing.T) {

View File

@ -9,11 +9,6 @@ package types
type ContainerState struct {
State StateString `json:"state"`
BlockDeviceID string
// File system of the rootfs incase it is block device
Fstype string `json:"fstype"`
// CgroupPath is the cgroup hierarchy where sandbox's processes
// including the hypervisor are placed.
CgroupPath string `json:"cgroupPath,omitempty"`