mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-07 04:19:58 +00:00
Merge pull request #1432 from dgibson/bug1431
block: Generate PCI path for virtio-blk devices on clh
This commit is contained in:
commit
cd27308755
@ -332,14 +332,9 @@ async fn virtio_blk_device_handler(
|
|||||||
devidx: &DevIndex,
|
devidx: &DevIndex,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut dev = device.clone();
|
let mut dev = device.clone();
|
||||||
|
|
||||||
// When "Id (PCI path)" is not set, we allow to use the predicted
|
|
||||||
// "VmPath" passed from kata-runtime Note this is a special code
|
|
||||||
// path for cloud-hypervisor when BDF information is not available
|
|
||||||
if !device.id.is_empty() {
|
|
||||||
let pcipath = pci::Path::from_str(&device.id)?;
|
let pcipath = pci::Path::from_str(&device.id)?;
|
||||||
|
|
||||||
dev.vm_path = get_virtio_blk_pci_device_name(sandbox, &pcipath).await?;
|
dev.vm_path = get_virtio_blk_pci_device_name(sandbox, &pcipath).await?;
|
||||||
}
|
|
||||||
|
|
||||||
update_spec_device_list(&dev, spec, devidx)
|
update_spec_device_list(&dev, spec, devidx)
|
||||||
}
|
}
|
||||||
|
@ -427,6 +427,27 @@ func clhDriveIndexToID(i int) string {
|
|||||||
return "clh_drive_" + strconv.Itoa(i)
|
return "clh_drive_" + strconv.Itoa(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Various cloud-hypervisor APIs report a PCI address in "BB:DD.F"
|
||||||
|
// form within the PciDeviceInfo struct. This is a broken API,
|
||||||
|
// because there's no way clh can reliably know the guest side bdf for
|
||||||
|
// a device, since the bus number depends on how the guest firmware
|
||||||
|
// and/or kernel enumerates it. They get away with it only because
|
||||||
|
// they don't use bridges, and so the bus is always 0. Under that
|
||||||
|
// assumption convert a clh PciDeviceInfo into a PCI path
|
||||||
|
func clhPciInfoToPath(pciInfo chclient.PciDeviceInfo) (vcTypes.PciPath, error) {
|
||||||
|
tokens := strings.Split(pciInfo.Bdf, ":")
|
||||||
|
if len(tokens) != 3 || tokens[0] != "0000" || tokens[1] != "00" {
|
||||||
|
return vcTypes.PciPath{}, fmt.Errorf("Unexpected PCI address %q from clh hotplug", pciInfo.Bdf)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = strings.Split(tokens[2], ".")
|
||||||
|
if len(tokens) != 2 || tokens[1] != "0" || len(tokens[0]) != 2 {
|
||||||
|
return vcTypes.PciPath{}, fmt.Errorf("Unexpected PCI address %q from clh hotplug", pciInfo.Bdf)
|
||||||
|
}
|
||||||
|
|
||||||
|
return vcTypes.PciPathFromString(tokens[0])
|
||||||
|
}
|
||||||
|
|
||||||
func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) error {
|
func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) error {
|
||||||
if clh.config.BlockDeviceDriver != config.VirtioBlock {
|
if clh.config.BlockDeviceDriver != config.VirtioBlock {
|
||||||
return fmt.Errorf("incorrect hypervisor configuration on 'block_device_driver':"+
|
return fmt.Errorf("incorrect hypervisor configuration on 'block_device_driver':"+
|
||||||
@ -441,24 +462,24 @@ func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) erro
|
|||||||
|
|
||||||
driveID := clhDriveIndexToID(drive.Index)
|
driveID := clhDriveIndexToID(drive.Index)
|
||||||
|
|
||||||
//Explicitly set PCIPath to NULL, so that VirtPath can be used
|
|
||||||
drive.PCIPath = vcTypes.PciPath{}
|
|
||||||
|
|
||||||
if drive.Pmem {
|
if drive.Pmem {
|
||||||
err = fmt.Errorf("pmem device hotplug not supported")
|
return fmt.Errorf("pmem device hotplug not supported")
|
||||||
} else {
|
}
|
||||||
|
|
||||||
blkDevice := chclient.DiskConfig{
|
blkDevice := chclient.DiskConfig{
|
||||||
Path: drive.File,
|
Path: drive.File,
|
||||||
Readonly: drive.ReadOnly,
|
Readonly: drive.ReadOnly,
|
||||||
VhostUser: false,
|
VhostUser: false,
|
||||||
Id: driveID,
|
Id: driveID,
|
||||||
}
|
}
|
||||||
_, _, err = cl.VmAddDiskPut(ctx, blkDevice)
|
pciInfo, _, err := cl.VmAddDiskPut(ctx, blkDevice)
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("failed to hotplug block device %+v %s", drive, openAPIClientError(err))
|
return fmt.Errorf("failed to hotplug block device %+v %s", drive, openAPIClientError(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drive.PCIPath, err = clhPciInfoToPath(pciInfo)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ func (c *clhClientMock) VmAddDevicePut(ctx context.Context, vmAddDevice chclient
|
|||||||
|
|
||||||
//nolint:golint
|
//nolint:golint
|
||||||
func (c *clhClientMock) VmAddDiskPut(ctx context.Context, diskConfig chclient.DiskConfig) (chclient.PciDeviceInfo, *http.Response, error) {
|
func (c *clhClientMock) VmAddDiskPut(ctx context.Context, diskConfig chclient.DiskConfig) (chclient.PciDeviceInfo, *http.Response, error) {
|
||||||
return chclient.PciDeviceInfo{}, nil, nil
|
return chclient.PciDeviceInfo{Bdf: "0000:00:0a.0"}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:golint
|
//nolint:golint
|
||||||
|
@ -1228,12 +1228,7 @@ func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox,
|
|||||||
rootfs.Source = blockDrive.DevNo
|
rootfs.Source = blockDrive.DevNo
|
||||||
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
|
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
|
||||||
rootfs.Driver = kataBlkDevType
|
rootfs.Driver = kataBlkDevType
|
||||||
if blockDrive.PCIPath.IsNil() {
|
|
||||||
rootfs.Source = blockDrive.VirtPath
|
|
||||||
} else {
|
|
||||||
rootfs.Source = blockDrive.PCIPath.String()
|
rootfs.Source = blockDrive.PCIPath.String()
|
||||||
}
|
|
||||||
|
|
||||||
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
|
case sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
|
||||||
rootfs.Driver = kataSCSIDevType
|
rootfs.Driver = kataSCSIDevType
|
||||||
rootfs.Source = blockDrive.SCSIAddr
|
rootfs.Source = blockDrive.SCSIAddr
|
||||||
@ -1490,11 +1485,7 @@ func (k *kataAgent) handleDeviceBlockVolume(c *Container, m Mount, device api.De
|
|||||||
vol.Source = blockDrive.DevNo
|
vol.Source = blockDrive.DevNo
|
||||||
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
|
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
|
||||||
vol.Driver = kataBlkDevType
|
vol.Driver = kataBlkDevType
|
||||||
if blockDrive.PCIPath.IsNil() {
|
|
||||||
vol.Source = blockDrive.VirtPath
|
|
||||||
} else {
|
|
||||||
vol.Source = blockDrive.PCIPath.String()
|
vol.Source = blockDrive.PCIPath.String()
|
||||||
}
|
|
||||||
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
|
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
|
||||||
vol.Driver = kataMmioBlkDevType
|
vol.Driver = kataMmioBlkDevType
|
||||||
vol.Source = blockDrive.VirtPath
|
vol.Source = blockDrive.VirtPath
|
||||||
|
@ -281,18 +281,6 @@ func TestHandleDeviceBlockVolume(t *testing.T) {
|
|||||||
Source: testPCIPath.String(),
|
Source: testPCIPath.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
BlockDeviceDriver: config.VirtioBlock,
|
|
||||||
inputDev: &drivers.BlockDevice{
|
|
||||||
BlockDrive: &config.BlockDrive{
|
|
||||||
VirtPath: testVirtPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
resultVol: &pb.Storage{
|
|
||||||
Driver: kataBlkDevType,
|
|
||||||
Source: testVirtPath,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
BlockDeviceDriver: config.VirtioMmio,
|
BlockDeviceDriver: config.VirtioMmio,
|
||||||
inputDev: &drivers.BlockDevice{
|
inputDev: &drivers.BlockDevice{
|
||||||
|
Loading…
Reference in New Issue
Block a user