agent: append and handle vhost-user-blk device

Kata-runtime can append vhost-user-blk device to the
device list of a container. And handle volumes who is
a block device and in VhostUserBlk type.

The vhost-user-blk device will be identified by its
PCI address by Kata-agent inside VM.

Fixes: #2380

Signed-off-by: Liu Xiaodong <xiaodong.liu@intel.com>
This commit is contained in:
Liu Xiaodong 2018-02-22 01:25:14 -05:00
parent cf066b75ac
commit 126fa157a3
2 changed files with 250 additions and 51 deletions

View File

@ -22,6 +22,7 @@ import (
aTypes "github.com/kata-containers/agent/pkg/types" aTypes "github.com/kata-containers/agent/pkg/types"
kataclient "github.com/kata-containers/agent/protocols/client" kataclient "github.com/kata-containers/agent/protocols/client"
"github.com/kata-containers/agent/protocols/grpc" "github.com/kata-containers/agent/protocols/grpc"
"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
@ -1089,7 +1090,62 @@ func (k *kataAgent) handleShm(grpcSpec *grpc.Spec, sandbox *Sandbox) {
} }
} }
func (k *kataAgent) appendBlockDevice(dev ContainerDevice, c *Container) *grpc.Device {
device := c.sandbox.devManager.GetDeviceByID(dev.ID)
d, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed block drive")
return nil
}
kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
}
switch c.sandbox.config.HypervisorConfig.BlockDeviceDriver {
case config.VirtioMmio:
kataDevice.Type = kataMmioBlkDevType
kataDevice.Id = d.VirtPath
kataDevice.VmPath = d.VirtPath
case config.VirtioBlockCCW:
kataDevice.Type = kataBlkCCWDevType
kataDevice.Id = d.DevNo
case config.VirtioBlock:
kataDevice.Type = kataBlkDevType
kataDevice.Id = d.PCIAddr
case config.VirtioSCSI:
kataDevice.Type = kataSCSIDevType
kataDevice.Id = d.SCSIAddr
case config.Nvdimm:
kataDevice.Type = kataNvdimmDevType
kataDevice.VmPath = fmt.Sprintf("/dev/pmem%s", d.NvdimmID)
}
return kataDevice
}
func (k *kataAgent) appendVhostUserBlkDevice(dev ContainerDevice, c *Container) *grpc.Device {
device := c.sandbox.devManager.GetDeviceByID(dev.ID)
d, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed vhost-user-blk drive")
return nil
}
kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
Type: kataBlkDevType,
Id: d.PCIAddr,
}
return kataDevice
}
func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*grpc.Device { func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*grpc.Device {
var kataDevice *grpc.Device
for _, dev := range c.devices { for _, dev := range c.devices {
device := c.sandbox.devManager.GetDeviceByID(dev.ID) device := c.sandbox.devManager.GetDeviceByID(dev.ID)
if device == nil { if device == nil {
@ -1097,39 +1153,17 @@ func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*gr
return nil return nil
} }
if device.DeviceType() != config.DeviceBlock { switch device.DeviceType() {
case config.DeviceBlock:
kataDevice = k.appendBlockDevice(dev, c)
case config.VhostUserBlk:
kataDevice = k.appendVhostUserBlkDevice(dev, c)
}
if kataDevice == nil {
continue continue
} }
d, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed block drive")
continue
}
kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
}
switch c.sandbox.config.HypervisorConfig.BlockDeviceDriver {
case config.VirtioMmio:
kataDevice.Type = kataMmioBlkDevType
kataDevice.Id = d.VirtPath
kataDevice.VmPath = d.VirtPath
case config.VirtioBlockCCW:
kataDevice.Type = kataBlkCCWDevType
kataDevice.Id = d.DevNo
case config.VirtioBlock:
kataDevice.Type = kataBlkDevType
kataDevice.Id = d.PCIAddr
case config.VirtioSCSI:
kataDevice.Type = kataSCSIDevType
kataDevice.Id = d.SCSIAddr
case config.Nvdimm:
kataDevice.Type = kataNvdimmDevType
kataDevice.VmPath = fmt.Sprintf("/dev/pmem%s", d.NvdimmID)
}
deviceList = append(deviceList, kataDevice) deviceList = append(deviceList, kataDevice)
} }
@ -1416,6 +1450,53 @@ func (k *kataAgent) handleLocalStorage(mounts []specs.Mount, sandboxID string, r
return localStorages return localStorages
} }
// handleDeviceBlockVolume handles volume that is block device file
// and DeviceBlock type.
func (k *kataAgent) handleDeviceBlockVolume(c *Container, device api.Device) (*grpc.Storage, error) {
vol := &grpc.Storage{}
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")
return nil, fmt.Errorf("malformed block drive")
}
switch {
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlockCCW:
vol.Driver = kataBlkCCWDevType
vol.Source = blockDrive.DevNo
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
vol.Driver = kataBlkDevType
vol.Source = blockDrive.PCIAddr
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
vol.Driver = kataMmioBlkDevType
vol.Source = blockDrive.VirtPath
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
vol.Driver = kataSCSIDevType
vol.Source = blockDrive.SCSIAddr
default:
return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver)
}
return vol, nil
}
// handleVhostUserBlkVolume handles volume that is block device file
// and VhostUserBlk type.
func (k *kataAgent) handleVhostUserBlkVolume(c *Container, device api.Device) (*grpc.Storage, error) {
vol := &grpc.Storage{}
d, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok || d == nil {
k.Logger().Error("malformed vhost-user blk drive")
return nil, fmt.Errorf("malformed vhost-user blk drive")
}
vol.Driver = kataBlkDevType
vol.Source = d.PCIAddr
return vol, nil
}
// handleBlockVolumes handles volumes that are block devices files // handleBlockVolumes handles volumes that are block devices files
// by passing the block devices as Storage to the agent. // by passing the block devices as Storage to the agent.
func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) { func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) {
@ -1433,33 +1514,27 @@ func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) {
// device is detached with detachDevices() for a container. // device is detached with detachDevices() for a container.
c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination}) c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination})
vol := &grpc.Storage{} var vol *grpc.Storage
device := c.sandbox.devManager.GetDeviceByID(id) device := c.sandbox.devManager.GetDeviceByID(id)
if device == nil { if device == nil {
k.Logger().WithField("device", id).Error("failed to find device by id") k.Logger().WithField("device", id).Error("failed to find device by id")
return nil, fmt.Errorf("Failed to find device by id (id=%s)", id) return nil, fmt.Errorf("Failed to find device by id (id=%s)", id)
} }
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil { var err error
k.Logger().Error("malformed block drive") switch device.DeviceType() {
case config.DeviceBlock:
vol, err = k.handleDeviceBlockVolume(c, device)
case config.VhostUserBlk:
vol, err = k.handleVhostUserBlkVolume(c, device)
default:
k.Logger().Error("Unknown device type")
continue continue
} }
switch {
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlockCCW: if vol == nil || err != nil {
vol.Driver = kataBlkCCWDevType return nil, err
vol.Source = blockDrive.DevNo
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
vol.Driver = kataBlkDevType
vol.Source = blockDrive.PCIAddr
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
vol.Driver = kataMmioBlkDevType
vol.Source = blockDrive.VirtPath
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
vol.Driver = kataSCSIDevType
vol.Source = blockDrive.SCSIAddr
default:
return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver)
} }
vol.MountPoint = m.Destination vol.MountPoint = m.Destination

View File

@ -391,6 +391,82 @@ func TestHandleLocalStorage(t *testing.T) {
assert.Equal(t, localMountPoint, expected) assert.Equal(t, localMountPoint, expected)
} }
func TestHandleBlockVolume(t *testing.T) {
k := kataAgent{}
c := &Container{
id: "100",
}
containers := map[string]*Container{}
containers[c.id] = c
// Create a VhostUserBlk device and a DeviceBlock device
vDevID := "MockVhostUserBlk"
bDevID := "MockDeviceBlock"
vDestination := "/VhostUserBlk/destination"
bDestination := "/DeviceBlock/destination"
vPCIAddr := "0001:01"
bPCIAddr := "0002:01"
vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID})
bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID})
vDev.VhostUserDeviceAttrs = &config.VhostUserDeviceAttrs{PCIAddr: vPCIAddr}
bDev.BlockDrive = &config.BlockDrive{PCIAddr: bPCIAddr}
var devices []api.Device
devices = append(devices, vDev, bDev)
// Create a VhostUserBlk mount and a DeviceBlock mount
var mounts []Mount
vMount := Mount{
BlockDeviceID: vDevID,
Destination: vDestination,
}
bMount := Mount{
BlockDeviceID: bDevID,
Destination: bDestination,
}
mounts = append(mounts, vMount, bMount)
tmpDir := "/vhost/user/dir"
dm := manager.NewDeviceManager(manager.VirtioBlock, true, tmpDir, devices)
sConfig := SandboxConfig{}
sConfig.HypervisorConfig.BlockDeviceDriver = manager.VirtioBlock
sandbox := Sandbox{
id: "100",
containers: containers,
hypervisor: &mockHypervisor{},
devManager: dm,
ctx: context.Background(),
config: &sConfig,
}
containers[c.id].sandbox = &sandbox
containers[c.id].mounts = mounts
volumeStorages, err := k.handleBlockVolumes(c)
assert.Nil(t, err, "Error while handling block volumes")
vStorage := &pb.Storage{
MountPoint: vDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: vPCIAddr,
}
bStorage := &pb.Storage{
MountPoint: bDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: bPCIAddr,
}
assert.Equal(t, vStorage, volumeStorages[0], "Error while handle VhostUserBlk type block volume")
assert.Equal(t, bStorage, volumeStorages[1], "Error while handle BlockDevice type block volume")
}
func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) { func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {
k := kataAgent{} k := kataAgent{}
@ -400,7 +476,7 @@ func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {
c := &Container{ c := &Container{
sandbox: &Sandbox{ sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-scsi", nil), devManager: manager.NewDeviceManager("virtio-scsi", false, "", nil),
}, },
devices: ctrDevices, devices: ctrDevices,
} }
@ -433,7 +509,55 @@ func TestAppendDevices(t *testing.T) {
c := &Container{ c := &Container{
sandbox: &Sandbox{ sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", ctrDevices), devManager: manager.NewDeviceManager("virtio-blk", false, "", ctrDevices),
config: sandboxConfig,
},
}
c.devices = append(c.devices, ContainerDevice{
ID: id,
ContainerPath: testBlockDeviceCtrPath,
})
devList := []*pb.Device{}
expected := []*pb.Device{
{
Type: kataBlkDevType,
ContainerPath: testBlockDeviceCtrPath,
Id: testPCIAddr,
},
}
updatedDevList := k.appendDevices(devList, c)
assert.True(t, reflect.DeepEqual(updatedDevList, expected),
"Device lists didn't match: got %+v, expecting %+v",
updatedDevList, expected)
}
func TestAppendVhostUserBlkDevices(t *testing.T) {
k := kataAgent{}
id := "test-append-vhost-user-blk"
ctrDevices := []api.Device{
&drivers.VhostUserBlkDevice{
GenericDevice: &drivers.GenericDevice{
ID: id,
},
VhostUserDeviceAttrs: &config.VhostUserDeviceAttrs{
Type: config.VhostUserBlk,
PCIAddr: testPCIAddr,
},
},
}
sandboxConfig := &SandboxConfig{
HypervisorConfig: HypervisorConfig{
BlockDeviceDriver: config.VirtioBlock,
},
}
testVhostUserStorePath := "/test/vhost/user/store/path"
c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", true, testVhostUserStorePath, ctrDevices),
config: sandboxConfig, config: sandboxConfig,
}, },
} }