agent/block: Generate PCI path for virtio-blk devices on clh

Currently runtime and agent special case virtio-blk devices under clh,
ostensibly because the PCI address information is not available in that
case.

In fact, cloud-hypervisor's VmAddDiskPut API does return a PciDeviceInfo,
which includes a PCI address.  That API is broken, because PCI addressing
depends on guest (firmware or OS) actions that the hypervisor won't know
about.  clh only gets away with this because it only uses a single PCI root
and never uses PCI bridges, in which case the guest addresses are
accurately predictable: they always have domain and bus zero.

Until https://github.com/kata-containers/kata-containers/pull/1190, Kata
couldn't handle PCI addressing unless there was exactly one bridge, which
might be why this was actually special-cased for clh.

With #1190 merged, we can handle more general PCI paths, and we can derive
a trivial (one element) PCI path from the information that the clh API
gives us.  We can use that to remove this special case.

fixes #1431

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
David Gibson 2021-02-18 17:19:39 +11:00
parent 2e60a9d3d9
commit 1d5098de70
5 changed files with 39 additions and 44 deletions

View File

@ -332,14 +332,9 @@ async fn virtio_blk_device_handler(
devidx: &DevIndex,
) -> Result<()> {
let mut dev = device.clone();
let pcipath = pci::Path::from_str(&device.id)?;
// When "Id (PCI path)" is not set, we allow to use the predicted
// "VmPath" passed from kata-runtime Note this is a special code
// path for cloud-hypervisor when BDF information is not available
if !device.id.is_empty() {
let pcipath = pci::Path::from_str(&device.id)?;
dev.vm_path = get_virtio_blk_pci_device_name(sandbox, &pcipath).await?;
}
dev.vm_path = get_virtio_blk_pci_device_name(sandbox, &pcipath).await?;
update_spec_device_list(&dev, spec, devidx)
}

View File

@ -427,6 +427,27 @@ func clhDriveIndexToID(i int) string {
return "clh_drive_" + strconv.Itoa(i)
}
// Various cloud-hypervisor APIs report a PCI address in "BB:DD.F"
// form within the PciDeviceInfo struct. This is a broken API,
// because there's no way clh can reliably know the guest side bdf for
// a device, since the bus number depends on how the guest firmware
// and/or kernel enumerates it. They get away with it only because
// they don't use bridges, and so the bus is always 0. Under that
// assumption convert a clh PciDeviceInfo into a PCI path
func clhPciInfoToPath(pciInfo chclient.PciDeviceInfo) (vcTypes.PciPath, error) {
tokens := strings.Split(pciInfo.Bdf, ":")
if len(tokens) != 3 || tokens[0] != "0000" || tokens[1] != "00" {
return vcTypes.PciPath{}, fmt.Errorf("Unexpected PCI address %q from clh hotplug", pciInfo.Bdf)
}
tokens = strings.Split(tokens[2], ".")
if len(tokens) != 2 || tokens[1] != "0" || len(tokens[0]) != 2 {
return vcTypes.PciPath{}, fmt.Errorf("Unexpected PCI address %q from clh hotplug", pciInfo.Bdf)
}
return vcTypes.PciPathFromString(tokens[0])
}
func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) error {
if clh.config.BlockDeviceDriver != config.VirtioBlock {
return fmt.Errorf("incorrect hypervisor configuration on 'block_device_driver':"+
@ -441,24 +462,24 @@ func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) erro
driveID := clhDriveIndexToID(drive.Index)
//Explicitly set PCIPath to NULL, so that VirtPath can be used
drive.PCIPath = vcTypes.PciPath{}
if drive.Pmem {
err = fmt.Errorf("pmem device hotplug not supported")
} else {
blkDevice := chclient.DiskConfig{
Path: drive.File,
Readonly: drive.ReadOnly,
VhostUser: false,
Id: driveID,
}
_, _, err = cl.VmAddDiskPut(ctx, blkDevice)
return fmt.Errorf("pmem device hotplug not supported")
}
blkDevice := chclient.DiskConfig{
Path: drive.File,
Readonly: drive.ReadOnly,
VhostUser: false,
Id: driveID,
}
pciInfo, _, err := cl.VmAddDiskPut(ctx, blkDevice)
if err != nil {
err = fmt.Errorf("failed to hotplug block device %+v %s", drive, openAPIClientError(err))
return fmt.Errorf("failed to hotplug block device %+v %s", drive, openAPIClientError(err))
}
drive.PCIPath, err = clhPciInfoToPath(pciInfo)
return err
}

View File

@ -101,7 +101,7 @@ func (c *clhClientMock) VmAddDevicePut(ctx context.Context, vmAddDevice chclient
//nolint:golint
func (c *clhClientMock) VmAddDiskPut(ctx context.Context, diskConfig chclient.DiskConfig) (chclient.PciDeviceInfo, *http.Response, error) {
return chclient.PciDeviceInfo{}, nil, nil
return chclient.PciDeviceInfo{Bdf: "0000:00:0a.0"}, nil, nil
}
//nolint:golint

View File

@ -1228,12 +1228,7 @@ func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox,
rootfs.Source = blockDrive.DevNo
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
rootfs.Driver = kataBlkDevType
if blockDrive.PCIPath.IsNil() {
rootfs.Source = blockDrive.VirtPath
} else {
rootfs.Source = blockDrive.PCIPath.String()
}
rootfs.Source = blockDrive.PCIPath.String()
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
rootfs.Driver = kataSCSIDevType
rootfs.Source = blockDrive.SCSIAddr
@ -1490,11 +1485,7 @@ func (k *kataAgent) handleDeviceBlockVolume(c *Container, m Mount, device api.De
vol.Source = blockDrive.DevNo
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
vol.Driver = kataBlkDevType
if blockDrive.PCIPath.IsNil() {
vol.Source = blockDrive.VirtPath
} else {
vol.Source = blockDrive.PCIPath.String()
}
vol.Source = blockDrive.PCIPath.String()
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
vol.Driver = kataMmioBlkDevType
vol.Source = blockDrive.VirtPath

View File

@ -281,18 +281,6 @@ func TestHandleDeviceBlockVolume(t *testing.T) {
Source: testPCIPath.String(),
},
},
{
BlockDeviceDriver: config.VirtioBlock,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
VirtPath: testVirtPath,
},
},
resultVol: &pb.Storage{
Driver: kataBlkDevType,
Source: testVirtPath,
},
},
{
BlockDeviceDriver: config.VirtioMmio,
inputDev: &drivers.BlockDevice{