block: Add new block storage driver "nvdimm"

Set block_device_driver to "nvdimm" will make the hypervisor use
the block device as NVDIMM disk.

Fixes: #1032

Signed-off-by: Hui Zhu <teawater@hyper.sh>
This commit is contained in:
Hui Zhu 2018-12-14 12:39:04 +08:00
parent b954eecad1
commit ef75c3d19e
7 changed files with 107 additions and 49 deletions

View File

@ -91,8 +91,8 @@ default_memory = @DEFMEMSZ@
disable_block_device_use = @DEFDISABLEBLOCK@ disable_block_device_use = @DEFDISABLEBLOCK@
# Block storage driver to be used for the hypervisor in case the container # Block storage driver to be used for the hypervisor in case the container
# rootfs is backed by a block device. This is either virtio-scsi or # rootfs is backed by a block device. This is virtio-scsi, virtio-blk
# virtio-blk. # or nvdimm.
block_device_driver = "@DEFBLOCKSTORAGEDRIVER@" block_device_driver = "@DEFBLOCKSTORAGEDRIVER@"
# Specifies cache-related options will be set to block devices or not. # Specifies cache-related options will be set to block devices or not.

View File

@ -294,7 +294,7 @@ func (h hypervisor) defaultBridges() uint32 {
} }
func (h hypervisor) blockDeviceDriver() (string, error) { func (h hypervisor) blockDeviceDriver() (string, error) {
supportedBlockDrivers := []string{config.VirtioSCSI, config.VirtioBlock, config.VirtioMmio} supportedBlockDrivers := []string{config.VirtioSCSI, config.VirtioBlock, config.VirtioMmio, config.Nvdimm}
if h.BlockDeviceDriver == "" { if h.BlockDeviceDriver == "" {
return defaultBlockDeviceDriver, nil return defaultBlockDeviceDriver, nil

View File

@ -47,6 +47,9 @@ const (
// VirtioSCSI means use virtio-scsi for hotplugging drives // VirtioSCSI means use virtio-scsi for hotplugging drives
VirtioSCSI = "virtio-scsi" VirtioSCSI = "virtio-scsi"
// Nvdimm means use nvdimm for hotplugging drives
Nvdimm = "nvdimm"
) )
// Defining these as a variable instead of a const, to allow // Defining these as a variable instead of a const, to allow
@ -119,6 +122,9 @@ type BlockDrive struct {
// SCSI address is in the format SCSI-Id:LUN // SCSI address is in the format SCSI-Id:LUN
SCSIAddr string SCSIAddr string
// NvdimmID is the nvdimm id inside the VM
NvdimmID string
// VirtPath at which the device appears inside the VM, outside of the container mount namespace // VirtPath at which the device appears inside the VM, outside of the container mount namespace
VirtPath string VirtPath string
} }

View File

@ -78,7 +78,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
} }
drive.SCSIAddr = scsiAddr drive.SCSIAddr = scsiAddr
} else { } else if customOptions["block-driver"] != "nvdimm" {
var globalIdx int var globalIdx int
switch customOptions["block-driver"] { switch customOptions["block-driver"] {
@ -102,7 +102,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
drive.VirtPath = filepath.Join("/dev", driveName) drive.VirtPath = filepath.Join("/dev", driveName)
} }
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Attaching block device") deviceLogger().WithField("device", device.DeviceInfo.HostPath).WithField("VirtPath", drive.VirtPath).Infof("Attaching %s device", customOptions["block-driver"])
device.BlockDrive = drive device.BlockDrive = drive
if err = devReceiver.HotplugAddDevice(device, config.DeviceBlock); err != nil { if err = devReceiver.HotplugAddDevice(device, config.DeviceBlock); err != nil {
return err return err

View File

@ -26,6 +26,8 @@ const (
VirtioBlock string = "virtio-blk" VirtioBlock string = "virtio-blk"
// VirtioSCSI indicates block driver is virtio-scsi based // VirtioSCSI indicates block driver is virtio-scsi based
VirtioSCSI string = "virtio-scsi" VirtioSCSI string = "virtio-scsi"
// Nvdimm indicates block driver is nvdimm based
Nvdimm string = "nvdimm"
) )
var ( var (
@ -61,6 +63,8 @@ func NewDeviceManager(blockDriver string, devices []api.Device) api.DeviceManage
dm.blockDriver = VirtioMmio dm.blockDriver = VirtioMmio
} else if blockDriver == VirtioBlock { } else if blockDriver == VirtioBlock {
dm.blockDriver = VirtioBlock dm.blockDriver = VirtioBlock
} else if blockDriver == Nvdimm {
dm.blockDriver = Nvdimm
} else { } else {
dm.blockDriver = VirtioSCSI dm.blockDriver = VirtioSCSI
} }

View File

@ -62,6 +62,7 @@ var (
kataMmioBlkDevType = "mmioblk" kataMmioBlkDevType = "mmioblk"
kataBlkDevType = "blk" kataBlkDevType = "blk"
kataSCSIDevType = "scsi" kataSCSIDevType = "scsi"
kataNvdimmDevType = "nvdimm"
sharedDir9pOptions = []string{"trans=virtio,version=9p2000.L,cache=mmap", "nodev"} sharedDir9pOptions = []string{"trans=virtio,version=9p2000.L,cache=mmap", "nodev"}
shmDir = "shm" shmDir = "shm"
kataEphemeralDevType = "ephemeral" kataEphemeralDevType = "ephemeral"
@ -883,6 +884,9 @@ func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*gr
case config.VirtioSCSI: case config.VirtioSCSI:
kataDevice.Type = kataSCSIDevType kataDevice.Type = kataSCSIDevType
kataDevice.Id = d.SCSIAddr kataDevice.Id = d.SCSIAddr
case config.Nvdimm:
kataDevice.Type = kataNvdimmDevType
kataDevice.VmPath = fmt.Sprintf("/dev/pmem%s", d.NvdimmID)
} }
deviceList = append(deviceList, kataDevice) deviceList = append(deviceList, kataDevice)

View File

@ -8,20 +8,22 @@ package virtcontainers
import ( import (
"context" "context"
"fmt" "fmt"
govmmQemu "github.com/intel/govmm/qemu"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"syscall"
"time" "time"
"unsafe"
govmmQemu "github.com/intel/govmm/qemu"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/utils" "github.com/kata-containers/runtime/virtcontainers/utils"
"golang.org/x/sys/unix"
) )
// romFile is the file name of the ROM that can be used for virtio-pci devices. // romFile is the file name of the ROM that can be used for virtio-pci devices.
@ -73,6 +75,8 @@ type qemu struct {
fds []*os.File fds []*os.File
ctx context.Context ctx context.Context
nvdimmCount int
} }
const ( const (
@ -221,6 +225,20 @@ func (q *qemu) init(ctx context.Context, id string, hypervisorConfig *Hypervisor
q.config = *hypervisorConfig q.config = *hypervisorConfig
q.arch = newQemuArch(q.config) q.arch = newQemuArch(q.config)
initrdPath, err := q.config.InitrdAssetPath()
if err != nil {
return err
}
imagePath, err := q.config.ImageAssetPath()
if err != nil {
return err
}
if initrdPath == "" && imagePath != "" {
q.nvdimmCount = 1
} else {
q.nvdimmCount = 0
}
if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil { if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil {
q.Logger().Debug("Creating bridges") q.Logger().Debug("Creating bridges")
q.state.Bridges = q.arch.bridges(q.config.DefaultBridges) q.state.Bridges = q.arch.bridges(q.config.DefaultBridges)
@ -727,15 +745,27 @@ func (q *qemu) removeDeviceFromBridge(ID string) error {
return err return err
} }
func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error { func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, devID string) error {
err := q.qmpSetup() var err error
if q.config.BlockDeviceDriver == config.Nvdimm {
var blocksize int64
file, err := os.Open(drive.File)
if err != nil { if err != nil {
return err return err
} }
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&blocksize))); err != 0 {
return err
}
if err = q.qmpMonitorCh.qmp.ExecuteNVDIMMDeviceAdd(q.qmpMonitorCh.ctx, drive.ID, drive.File, blocksize); err != nil {
q.Logger().WithError(err).Errorf("Failed to add NVDIMM device %s", drive.File)
return err
}
drive.NvdimmID = strconv.Itoa(q.nvdimmCount)
q.nvdimmCount++
return nil
}
devID := "virtio-" + drive.ID
if op == addDevice {
if q.config.BlockDeviceCacheSet { if q.config.BlockDeviceCacheSet {
err = q.qmpMonitorCh.qmp.ExecuteBlockdevAddWithCache(q.qmpMonitorCh.ctx, drive.File, drive.ID, q.config.BlockDeviceCacheDirect, q.config.BlockDeviceCacheNoflush) err = q.qmpMonitorCh.qmp.ExecuteBlockdevAddWithCache(q.qmpMonitorCh.ctx, drive.File, drive.ID, q.config.BlockDeviceCacheDirect, q.config.BlockDeviceCacheNoflush)
} else { } else {
@ -774,6 +804,20 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
return err return err
} }
} }
return nil
}
func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error {
err := q.qmpSetup()
if err != nil {
return err
}
devID := "virtio-" + drive.ID
if op == addDevice {
err = q.hotplugAddBlockDevice(drive, op, devID)
} else { } else {
if q.config.BlockDeviceDriver == config.VirtioBlock { if q.config.BlockDeviceDriver == config.VirtioBlock {
if err := q.removeDeviceFromBridge(drive.ID); err != nil { if err := q.removeDeviceFromBridge(drive.ID); err != nil {
@ -790,7 +834,7 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
} }
} }
return nil return err
} }
func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) error { func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) error {