device: use devicemanager to manage rootfs block

Fixes #635

When container rootfs is block based in devicemapper use case, we can re-use
sandbox device manager to manage rootfs block plug/unplug, we don't detailed
description of block in container state file, instead we only need a Block index
referencing sandbox device.

Remove `HotpluggedDrive` and `RootfsPCIAddr` from state file because it's not
necessary any more.

Signed-off-by: Wei Zhang <zhangwei555@huawei.com>
This commit is contained in:
Wei Zhang 2018-08-31 17:49:27 +08:00
parent e599ef430a
commit c518b1ef00
5 changed files with 103 additions and 95 deletions

View File

@ -342,28 +342,6 @@ func (c *Container) setStateFstype(fstype string) error {
return nil
}
func (c *Container) setStateHotpluggedDrive(hotplugged bool) error {
c.state.HotpluggedDrive = hotplugged
err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
if err != nil {
return err
}
return nil
}
func (c *Container) setContainerRootfsPCIAddr(addr string) error {
c.state.RootfsPCIAddr = addr
err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
if err != nil {
return err
}
return nil
}
// GetAnnotations returns container's annotations
func (c *Container) GetAnnotations() map[string]string {
return c.config.Annotations
@ -1080,38 +1058,43 @@ func (c *Container) hotplugDrive() error {
return err
}
devicePath, err = filepath.EvalSymlinks(devicePath)
if err != nil {
return err
}
c.Logger().WithFields(logrus.Fields{
"device-path": devicePath,
"fs-type": fsType,
}).Info("Block device detected")
driveIndex, err := c.sandbox.getAndSetSandboxBlockIndex()
if err != nil {
return err
var stat unix.Stat_t
if err := unix.Stat(devicePath, &stat); err != nil {
return fmt.Errorf("stat %q failed: %v", devicePath, err)
}
// TODO: use general device manager instead of BlockDrive directly
// Add drive with id as container id
devID := utils.MakeNameID("drive", c.id, maxDevIDSize)
drive := config.BlockDrive{
File: devicePath,
Format: "raw",
ID: devID,
Index: driveIndex,
}
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{
HostPath: devicePath,
ContainerPath: filepath.Join(kataGuestSharedDir, c.id),
DevType: "b",
Major: int64(unix.Major(stat.Rdev)),
Minor: int64(unix.Minor(stat.Rdev)),
})
if err != nil {
return fmt.Errorf("device manager failed to create rootfs device for %q: %v", devicePath, err)
}
if _, err := c.sandbox.hypervisor.hotplugAddDevice(&drive, blockDev); err != nil {
return err
}
c.state.BlockDeviceID = b.DeviceID()
if drive.PCIAddr != "" {
c.setContainerRootfsPCIAddr(drive.PCIAddr)
}
// attach rootfs device
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil {
return err
}
c.setStateHotpluggedDrive(true)
if err := c.setStateBlockIndex(driveIndex); err != nil {
return err
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
return c.setStateFstype(fsType)
@ -1126,19 +1109,28 @@ func (c *Container) isDriveUsed() bool {
}
func (c *Container) removeDrive() (err error) {
if c.isDriveUsed() && c.state.HotpluggedDrive {
if c.isDriveUsed() {
c.Logger().Info("unplugging block device")
devID := utils.MakeNameID("drive", c.id, maxDevIDSize)
drive := &config.BlockDrive{
ID: devID,
devID := c.state.BlockDeviceID
err := c.sandbox.devManager.DetachDevice(devID, c.sandbox)
if err != nil && err != manager.ErrDeviceNotAttached {
return err
}
l := c.Logger().WithField("device-id", devID)
l.Info("Unplugging block device")
if err = c.sandbox.devManager.RemoveDevice(devID); err != nil {
c.Logger().WithFields(logrus.Fields{
"container": c.id,
"device-id": devID,
}).WithError(err).Error("remove device failed")
if _, err := c.sandbox.hypervisor.hotplugRemoveDevice(drive, blockDev); err != nil {
l.WithError(err).Info("Failed to unplug block device")
// ignore the device not exist error
if err != manager.ErrDeviceNotExist {
return err
}
}
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}

View File

@ -6,6 +6,7 @@
package virtcontainers
import (
"context"
"io/ioutil"
"os"
"os/exec"
@ -15,6 +16,10 @@ import (
"syscall"
"testing"
"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/runtime/virtcontainers/device/manager"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/stretchr/testify/assert"
)
@ -83,7 +88,11 @@ func TestContainerSandbox(t *testing.T) {
}
func TestContainerRemoveDrive(t *testing.T) {
sandbox := &Sandbox{}
sandbox := &Sandbox{
id: "sandbox",
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
storage: &filesystem{},
}
container := Container{
sandbox: sandbox,
@ -95,26 +104,35 @@ func TestContainerRemoveDrive(t *testing.T) {
// hotplugRemoveDevice for hypervisor should not be called.
// test should pass without a hypervisor created for the container's sandbox.
if err != nil {
t.Fatal("")
assert.Nil(t, err, "remove drive should succeed")
sandbox.hypervisor = &mockHypervisor{}
path := "/dev/hda"
deviceInfo := config.DeviceInfo{
HostPath: path,
ContainerPath: path,
DevType: "b",
}
devReceiver := &api.MockDeviceReceiver{}
device, err := sandbox.devManager.NewDevice(deviceInfo)
assert.Nil(t, err)
_, ok := device.(*drivers.BlockDevice)
assert.True(t, ok)
err = device.Attach(devReceiver)
assert.Nil(t, err)
err = sandbox.storage.createAllResources(context.Background(), sandbox)
if err != nil {
t.Fatal(err)
}
err = sandbox.storeSandboxDevices()
assert.Nil(t, err)
container.state.Fstype = "xfs"
container.state.HotpluggedDrive = false
container.state.BlockDeviceID = device.DeviceID()
err = container.removeDrive()
// hotplugRemoveDevice for hypervisor should not be called.
if err != nil {
t.Fatal("")
}
container.state.HotpluggedDrive = true
sandbox.hypervisor = &mockHypervisor{}
err = container.removeDrive()
if err != nil {
t.Fatal()
}
assert.Nil(t, err, "remove drive should succeed")
}
func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err error) {
@ -197,6 +215,7 @@ func TestContainerAddDriveDir(t *testing.T) {
fs := &filesystem{}
sandbox := &Sandbox{
id: testSandboxID,
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
storage: fs,
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
@ -243,14 +262,13 @@ func TestContainerAddDriveDir(t *testing.T) {
}()
container.state.Fstype = ""
container.state.HotpluggedDrive = false
err = container.hotplugDrive()
if err != nil {
t.Fatalf("Error with hotplugDrive :%v", err)
}
if container.state.Fstype == "" || !container.state.HotpluggedDrive {
if container.state.Fstype == "" {
t.Fatal()
}
}

View File

@ -766,7 +766,7 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
}
func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if c.state.Fstype != "" {
if c.state.Fstype != "" && c.state.BlockDeviceID != "" {
// The rootfs storage volume represents the container rootfs
// mount point inside the guest.
// It can be a block based device (when using block based container
@ -775,23 +775,25 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
rootfs := &grpc.Storage{}
// This is a block based device rootfs.
// Pass a drive name only in case of virtio-blk driver.
// If virtio-scsi driver, the agent will be able to find the
// device based on the provided address.
if sandbox.config.HypervisorConfig.BlockDeviceDriver == VirtioBlock {
rootfs.Driver = kataBlkDevType
rootfs.Source = c.state.RootfsPCIAddr
} else {
scsiAddr, err := utils.GetSCSIAddress(c.state.BlockIndex)
if err != nil {
return nil, err
}
rootfs.Driver = kataSCSIDevType
rootfs.Source = scsiAddr
device := sandbox.devManager.GetDeviceByID(c.state.BlockDeviceID)
if device == nil {
k.Logger().WithField("device", c.state.BlockDeviceID).Error("failed to find device by id")
return nil, fmt.Errorf("failed to find device by id %q", c.state.BlockDeviceID)
}
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")
return nil, fmt.Errorf("malformed block drive")
}
if sandbox.config.HypervisorConfig.BlockDeviceDriver == VirtioBlock {
rootfs.Driver = kataBlkDevType
rootfs.Source = blockDrive.VirtPath
} else {
rootfs.Driver = kataSCSIDevType
rootfs.Source = blockDrive.SCSIAddr
}
rootfs.MountPoint = rootPathParent
rootfs.Fstype = c.state.Fstype
@ -801,6 +803,7 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
return rootfs, nil
}
// This is not a block based device rootfs.
// We are going to bind mount it into the 9pfs
// shared drive between the host and the guest.

View File

@ -54,18 +54,13 @@ const (
type State struct {
State stateString `json:"state"`
BlockDeviceID string
// Index of the block device passed to hypervisor.
BlockIndex int `json:"blockIndex"`
// File system of the rootfs incase it is block device
Fstype string `json:"fstype"`
// Bool to indicate if the drive for a container was hotplugged.
HotpluggedDrive bool `json:"hotpluggedDrive"`
// PCI slot at which the block device backing the container rootfs is attached.
RootfsPCIAddr string `json:"rootfsPCIAddr"`
// Pid is the process id of the sandbox container which is the first
// container to be started.
Pid int `json:"pid"`

View File

@ -200,12 +200,12 @@ func (v *VM) ReseedRNG() error {
data := make([]byte, 512)
f, err := os.OpenFile(urandomDev, os.O_RDONLY, 0)
if err != nil {
v.logger().WithError(err).Warn("fail to open %s", urandomDev)
v.logger().WithError(err).Warnf("fail to open %s", urandomDev)
return err
}
defer f.Close()
if _, err = f.Read(data); err != nil {
v.logger().WithError(err).Warn("fail to read %s", urandomDev)
v.logger().WithError(err).Warnf("fail to read %s", urandomDev)
return err
}