hv: vpci: handle the quirk part for pass through pci device cfg access in dm

There're some PCI devices need special handler for vendor-specical feature or
capability CFG access. The Intel GPU is one of them. In order to keep the ACRN-HV
clean, we want to throw the qurik part of PCI CFG asccess to DM to handle.

To achieve this, we implement per-device policy base on whether it needs quirk handler
for a VM: each device could configure as "quirk pass through device" or not. For a
"quirk pass through device", we will handle the general part in HV and the quirk part
in DM. For a non "quirk pass through device",  we will handle all the part in HV.

Tracked-On: #4371
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1 2020-03-13 11:44:51 +08:00 committed by wenlingz
parent e5c7a96513
commit e99ddf28c3
7 changed files with 75 additions and 36 deletions

View File

@ -123,6 +123,28 @@ read_config(struct pci_device *phys_dev, long reg, int width)
return temp;
}
static int
write_config(struct pci_device *phys_dev, long reg, int width, uint32_t data)
{
int temp = -1;
switch (width) {
case 1:
temp = pci_device_cfg_write_u8(phys_dev, data, reg);
break;
case 2:
temp = pci_device_cfg_write_u16(phys_dev, data, reg);
break;
case 4:
temp = pci_device_cfg_write_u32(phys_dev, data, reg);
break;
default:
warnx("%s: invalid reg width", __func__);
}
return temp;
}
static int
cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
{
@ -515,8 +537,10 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
/* initialize the EPT mapping for passthrough GPU opregion */
vm_map_ptdev_mmio(ctx, 0, 2, 0, GPU_OPREGION_GPA, GPU_OPREGION_SIZE, opregion_start_hpa);
pcidev.rsvd2[0] = GPU_GSM_GPA | (gsm_phys & ~PCIM_BDSM_GSM_MASK) ;
pcidev.rsvd2[1] = GPU_OPREGION_GPA | (opregion_phys & ~PCIM_ASLS_OPREGION_MASK);
pci_set_cfgdata32(dev, PCIR_BDSM, GPU_GSM_GPA | (gsm_phys & ~PCIM_BDSM_GSM_MASK));
pci_set_cfgdata32(dev, PCIR_ASLS_CTL, GPU_OPREGION_GPA | (opregion_phys & ~PCIM_ASLS_OPREGION_MASK));
pcidev.type = QUIRK_PTDEV;
}
pcidev.virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
@ -628,13 +652,27 @@ static int
passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int coff, int bytes, uint32_t *rv)
{
return ~0;
struct passthru_dev *ptdev = dev->arg;
if ((PCI_BDF(dev->bus, dev->slot, dev->func) == PCI_BDF_GPU) &&
((coff == PCIR_BDSM) || (coff == PCIR_ASLS_CTL)))
*rv = pci_get_cfgdata32(dev, coff);
else
*rv = read_config(ptdev->phys_dev, coff, bytes);
return 0;
}
static int
passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
int coff, int bytes, uint32_t val)
{
struct passthru_dev *ptdev = dev->arg;
if (!((PCI_BDF(dev->bus, dev->slot, dev->func) == PCI_BDF_GPU) &&
((coff == PCIR_BDSM) || (coff == PCIR_ASLS_CTL))))
write_config(ptdev->phys_dev, coff, bytes, val);
return 0;
}

View File

@ -154,8 +154,9 @@ struct vm_memmap {
*
*/
struct acrn_assign_pcidev {
/** reversed for externed compatibility */
uint32_t rsvd1;
#define QUIRK_PTDEV (1 << 0) /* We will only handle general part in HV, others in DM */
/** the type of the the pass-through PCI device */
uint32_t type;
/** virtual BDF# of the pass-through PCI device */
uint16_t virt_bdf;

View File

@ -82,11 +82,15 @@ static bool vpci_pio_cfgaddr_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
cfg_addr->value = val & (~0x7f000003U);
if (is_postlaunched_vm(vcpu->vm)) {
const struct pci_vdev *vdev;
vbdf.value = cfg_addr->bits.bdf;
/* For post-launched VM, ACRN will only handle PT device, all virtual PCI device
vdev = find_available_vdev(vpci, vbdf);
/* For post-launched VM, ACRN HV will only handle PT device,
* all virtual PCI device and QUIRK PT device
* still need to deliver to ACRN DM to handle.
*/
if (find_available_vdev(vpci, vbdf) == NULL) {
if ((vdev == NULL) || is_quirk_ptdev(vdev)) {
ret = false;
}
}
@ -482,6 +486,8 @@ static void write_cfg_header(struct pci_vdev *vdev,
static int32_t write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t val)
{
int32_t ret = 0;
if (cfg_header_access(offset)) {
write_cfg_header(vdev, offset, bytes, val);
} else if (msicap_access(vdev, offset)) {
@ -491,20 +497,22 @@ static int32_t write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
} else if (sriovcap_access(vdev, offset)) {
write_sriov_cap_reg(vdev, offset, bytes, val);
} else {
/* For GVT-D, prevent stolen memory and opregion memory write */
if (!(is_postlaunched_vm(vdev->vpci->vm) && is_gvtd(vdev->pdev->bdf) &&
((offset == PCIR_BDSM) || (offset == PCIR_ASLS_CTL)))) {
if (!is_quirk_ptdev(vdev)) {
/* passthru to physical device */
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
} else {
ret = -ENODEV;
}
}
return 0;
return ret;
}
static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t *val)
{
int32_t ret = 0;
if (cfg_header_access(offset)) {
read_cfg_header(vdev, offset, bytes, val);
} else if (msicap_access(vdev, offset)) {
@ -514,17 +522,15 @@ static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
} else if (sriovcap_access(vdev, offset)) {
read_sriov_cap_reg(vdev, offset, bytes, val);
} else {
/* For GVT-D, just return GPA for stolen memory and opregion memory read. */
if (is_postlaunched_vm(vdev->vpci->vm) && is_gvtd(vdev->pdev->bdf) &&
((offset == PCIR_BDSM) || (offset == PCIR_ASLS_CTL))) {
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
} else {
if (!is_quirk_ptdev(vdev)) {
/* passthru to physical device */
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
} else {
ret = -ENODEV;
}
}
return 0;
return ret;
}
static const struct pci_vdev_ops pci_pt_dev_ops = {
@ -546,7 +552,7 @@ static int32_t vpci_read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
spinlock_obtain(&vpci->lock);
vdev = find_available_vdev(vpci, bdf);
if (vdev != NULL) {
vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
ret = vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
} else {
if (is_postlaunched_vm(vpci->vm)) {
ret = -ENODEV;
@ -568,7 +574,7 @@ static int32_t vpci_write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
spinlock_obtain(&vpci->lock);
vdev = find_available_vdev(vpci, bdf);
if (vdev != NULL) {
vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
ret = vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
} else {
if (!is_postlaunched_vm(vpci->vm)) {
pr_acrnlog("%s %x:%x.%x not found! off: 0x%x, val: 0x%x\n", __func__,
@ -771,12 +777,7 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pc
pci_vdev_write_vbar(vdev, idx, pcidev->bar[idx]);
}
if (is_gvtd(bdf)) {
/* rsvd2[0U] for stolen memory GPA; rsvd2[1U] for opregion memory GPA */
pci_vdev_write_vcfg(vdev, PCIR_BDSM, 4U, pcidev->rsvd2[0U]);
pci_vdev_write_vcfg(vdev, PCIR_ASLS_CTL, 4U, pcidev->rsvd2[1U]);
}
vdev->flags |= pcidev->type;
vdev->bdf.value = pcidev->virt_bdf;
spinlock_release(&tgt_vm->vpci.lock);
vdev_in_sos->new_owner = vdev;

View File

@ -32,6 +32,11 @@
#include <pci.h>
static inline bool is_quirk_ptdev(const struct pci_vdev *vdev)
{
return ((vdev->flags & QUIRK_PTDEV) != 0U);
}
static inline bool in_range(uint32_t value, uint32_t lower, uint32_t len)
{
return ((value >= lower) && (value < (lower + len)));

View File

@ -104,6 +104,8 @@ struct pci_vdev {
union pci_cfgdata cfgdata;
uint32_t flags;
/* The bar info of the virtual PCI device. */
uint32_t nr_bars; /* 6 for normal device, 2 for bridge, 1 for cardbus */
struct pci_vbar vbars[PCI_BAR_COUNT];

View File

@ -186,10 +186,6 @@
#define HOST_BRIDGE_BDF 0U
#define PCI_STD_NUM_BARS 6U
/* Graphics definitions */
#define PCIR_BDSM 0x5CU /* BDSM graphics base data of stolen memory register */
#define PCIR_ASLS_CTL 0xFCU /* Opregion start addr register */
union pci_bdf {
uint16_t value;
struct {
@ -255,11 +251,6 @@ struct pci_cfg_ops {
void (*pci_write_cfg)(union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
};
static inline bool is_gvtd(union pci_bdf bdf)
{
return (bdf.value == CONFIG_GPU_SBDF);
}
static inline uint32_t pci_bar_offset(uint32_t idx)
{
return PCIR_BARS + (idx << 2U);

View File

@ -272,8 +272,9 @@ struct hc_ptdev_irq {
* the parameter for HC_ASSIGN_PCIDEV or HC_DEASSIGN_PCIDEV hypercall
*/
struct acrn_assign_pcidev {
/** reversed for externed compatibility */
uint32_t rsvd1;
#define QUIRK_PTDEV (1U << 0) /* We will only handle general part in HV, others in DM */
/** the type of the the pass-through PCI device */
uint32_t type;
/** virtual BDF# of the pass-through PCI device */
uint16_t virt_bdf;