mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-01 07:49:39 +00:00
vPCI: split passthrough PCI device from DM to HV
In this case, we could handle all the passthrough PCI devices in ACRN hypervisor. But we still need DM to initialize BAR resources and Intx for passthrough PCI device for post-launched VM since these informations should been filled into ACPI tables. So 1. we add a HC vm_assign_pcidev to pass the extra informations to replace the old vm_assign_ptdev. 2. we saso remove HC vm_set_ptdev_msix_info since it could been setted by the post-launched VM now same as SOS. 3. remove vm_map_ptdev_mmio call for PTDev in DM since ACRN hypervisor will handle these BAR access. 4. the most important thing is to trap PCI configure space access for PTDev in HV for post-launched VM and bypass the virtual PCI device configure space access to DM. This patch doesn't do the clean work. Will do it in the next patch. Tracked-On: #4371 Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
aa38ed5b69
commit
dafa3da693
@ -227,6 +227,15 @@ is_pci_gvt(struct pci_vdev *dev)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
is_pt_pci(struct pci_vdev *dev)
|
||||
{
|
||||
if (dev == NULL || strncmp(dev->dev_ops->class_name, "passthru",8))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* I/O access
|
||||
*/
|
||||
@ -602,6 +611,11 @@ modify_bar_registration(struct pci_vdev *dev, int idx, int registration)
|
||||
struct inout_port iop;
|
||||
struct mem_range mr;
|
||||
|
||||
if (is_pt_pci(dev)) {
|
||||
printf("%s: bypass for pci-passthru %x:%x.%x\n", __func__, dev->bus, dev->slot, dev->func);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (dev->bar[idx].type) {
|
||||
case PCIBAR_IO:
|
||||
bzero(&iop, sizeof(struct inout_port));
|
||||
|
@ -211,16 +211,8 @@ cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
{
|
||||
int ptr, capptr, cap, sts, caplen;
|
||||
uint32_t u32;
|
||||
struct pci_vdev *dev;
|
||||
struct pci_vdev *dev = ptdev->dev;
|
||||
struct pci_device *phys_dev = ptdev->phys_dev;
|
||||
uint16_t virt_bdf = PCI_BDF(ptdev->dev->bus,
|
||||
ptdev->dev->slot,
|
||||
ptdev->dev->func);
|
||||
uint32_t pba_info;
|
||||
uint32_t table_info;
|
||||
uint16_t msgctrl;
|
||||
|
||||
dev = ptdev->dev;
|
||||
|
||||
/*
|
||||
* Parse the capabilities and cache the location of the MSI
|
||||
@ -289,35 +281,6 @@ cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
}
|
||||
}
|
||||
|
||||
dev->msix.table_bar = -1;
|
||||
dev->msix.pba_bar = -1;
|
||||
if (ptdev->msix.capoff != 0) {
|
||||
capptr = ptdev->msix.capoff;
|
||||
|
||||
pba_info = pci_get_cfgdata32(dev, capptr + 8);
|
||||
dev->msix.pba_bar = pba_info & PCIM_MSIX_BIR_MASK;
|
||||
dev->msix.pba_offset = pba_info & ~PCIM_MSIX_BIR_MASK;
|
||||
|
||||
table_info = pci_get_cfgdata32(dev, capptr + 4);
|
||||
dev->msix.table_bar = table_info & PCIM_MSIX_BIR_MASK;
|
||||
dev->msix.table_offset = table_info & ~PCIM_MSIX_BIR_MASK;
|
||||
|
||||
msgctrl = pci_get_cfgdata16(dev, capptr + 2);
|
||||
dev->msix.table_count = MSIX_TABLE_COUNT(msgctrl);
|
||||
dev->msix.pba_size = PBA_SIZE(dev->msix.table_count);
|
||||
} else if (ptdev->msi.capoff != 0) {
|
||||
struct ic_ptdev_irq ptirq;
|
||||
|
||||
ptirq.type = IRQ_MSI;
|
||||
ptirq.virt_bdf = virt_bdf;
|
||||
ptirq.phys_bdf = ptdev->phys_bdf;
|
||||
/* currently, only support one vector for MSI */
|
||||
ptirq.msix.vector_cnt = 1;
|
||||
ptirq.msix.table_paddr = 0;
|
||||
ptirq.msix.table_size = 0;
|
||||
vm_set_ptdev_msix_info(ctx, &ptirq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -622,22 +585,6 @@ cfginitbar(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
vbar_lo32 &= ~PCIM_BAR_MEM_PREFETCH;
|
||||
|
||||
pci_set_cfgdata32(dev, PCIR_BAR(i), vbar_lo32);
|
||||
}
|
||||
|
||||
/* The MSI-X table needs special handling */
|
||||
if (i == ptdev_msix_table_bar(ptdev)) {
|
||||
error = init_msix_table(ctx, ptdev, base);
|
||||
if (error) {
|
||||
deinit_msix_table(ctx, ptdev);
|
||||
return -1;
|
||||
}
|
||||
} else if (bartype != PCIBAR_IO) {
|
||||
/* Map the physical BAR in the guest MMIO space */
|
||||
error = vm_map_ptdev_mmio(ctx, ptdev->sel.bus,
|
||||
ptdev->sel.dev, ptdev->sel.func,
|
||||
dev->bar[i].addr, dev->bar[i].size, base);
|
||||
if (error)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -781,13 +728,14 @@ pciaccess_init(void)
|
||||
static int
|
||||
passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
{
|
||||
int bus, slot, func, error;
|
||||
int bus, slot, func, idx, error;
|
||||
struct passthru_dev *ptdev;
|
||||
struct pci_device_iterator *iter;
|
||||
struct pci_device *phys_dev;
|
||||
char *opt;
|
||||
bool keep_gsi = false;
|
||||
bool need_reset = true;
|
||||
struct acrn_assign_pcidev pcidev = {};
|
||||
|
||||
ptdev = NULL;
|
||||
error = -EINVAL;
|
||||
@ -816,12 +764,6 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
warnx("Invalid passthru options:%s", opt);
|
||||
}
|
||||
|
||||
if (vm_assign_ptdev(ctx, bus, slot, func) != 0) {
|
||||
warnx("PCI device at %x/%x/%x is not using the pt(4) driver",
|
||||
bus, slot, func);
|
||||
goto done;
|
||||
}
|
||||
|
||||
ptdev = calloc(1, sizeof(struct passthru_dev));
|
||||
if (ptdev == NULL) {
|
||||
warnx("%s: calloc FAIL!", __func__);
|
||||
@ -894,31 +836,36 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
vm_map_ptdev_mmio(ctx, 0, 2, 0, GPU_OPREGION_GPA, GPU_OPREGION_SIZE, opregion_start_hpa);
|
||||
}
|
||||
|
||||
pcidev.virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
pcidev.phys_bdf = ptdev->phys_bdf;
|
||||
for (idx = 0; idx <= PCI_BARMAX; idx++) {
|
||||
pcidev.bar[idx] = pci_get_cfgdata32(dev, PCIR_BAR(idx));
|
||||
}
|
||||
|
||||
/* If ptdev support MSI/MSIX, stop here to skip virtual INTx setup.
|
||||
* Forge Guest to use MSI/MSIX in this case to mitigate IRQ sharing
|
||||
* issue
|
||||
*/
|
||||
if (error == IRQ_MSI && !keep_gsi)
|
||||
return 0;
|
||||
if (error != IRQ_MSI && !keep_gsi) {
|
||||
/* Allocates the virq if ptdev only support INTx */
|
||||
pci_lintr_request(dev);
|
||||
|
||||
/* Allocates the virq if ptdev only support INTx */
|
||||
pci_lintr_request(dev);
|
||||
ptdev->phys_pin = read_config(ptdev->phys_dev, PCIR_INTLINE, 1);
|
||||
|
||||
ptdev->phys_pin = read_config(ptdev->phys_dev, PCIR_INTLINE, 1);
|
||||
|
||||
if (ptdev->phys_pin == -1 || ptdev->phys_pin > 256) {
|
||||
warnx("ptdev %x/%x/%x has wrong phys_pin %d, likely fail!",
|
||||
bus, slot, func, ptdev->phys_pin);
|
||||
goto done;
|
||||
if (ptdev->phys_pin == -1 || ptdev->phys_pin > 256) {
|
||||
warnx("ptdev %x/%x/%x has wrong phys_pin %d, likely fail!",
|
||||
bus, slot, func, ptdev->phys_pin);
|
||||
error = -1;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
error = 0; /* success */
|
||||
pcidev.intr_line = pci_get_cfgdata8(dev, PCIR_INTLINE);
|
||||
pcidev.intr_pin = pci_get_cfgdata8(dev, PCIR_INTPIN);
|
||||
error = vm_assign_pcidev(ctx, &pcidev);
|
||||
done:
|
||||
if (error) {
|
||||
if (ptdev != NULL) {
|
||||
if (error && (ptdev != NULL)) {
|
||||
free(ptdev);
|
||||
}
|
||||
vm_unassign_ptdev(ctx, bus, slot, func);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
@ -937,9 +884,8 @@ static void
|
||||
passthru_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
{
|
||||
struct passthru_dev *ptdev;
|
||||
uint8_t bus, slot, func;
|
||||
uint16_t virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
int i;
|
||||
struct acrn_assign_pcidev pcidev = {};
|
||||
|
||||
if (!dev->arg) {
|
||||
warnx("%s: passthru_dev is NULL", __func__);
|
||||
@ -947,46 +893,23 @@ passthru_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
}
|
||||
|
||||
ptdev = (struct passthru_dev *) dev->arg;
|
||||
bus = (ptdev->phys_bdf >> 8) & 0xff;
|
||||
slot = (ptdev->phys_bdf & 0xff) >> 3;
|
||||
func = ptdev->phys_bdf & 0x7;
|
||||
|
||||
if (ptdev->msix.capoff != 0)
|
||||
deinit_msix_table(ctx, ptdev);
|
||||
else if(ptdev->msi.capoff != 0) {
|
||||
/* Currently only support 1 vector */
|
||||
vm_reset_ptdev_msix_info(ctx, virt_bdf, ptdev->phys_bdf, 1);
|
||||
}
|
||||
|
||||
pr_info("vm_reset_ptdev_intx:0x%x-%x, ioapic virpin=%d.\n",
|
||||
virt_bdf, ptdev->phys_bdf, dev->lintr.ioapic_irq);
|
||||
|
||||
if (dev->lintr.pin != 0) {
|
||||
vm_reset_ptdev_intx_info(ctx, virt_bdf, ptdev->phys_bdf, dev->lintr.ioapic_irq, false);
|
||||
}
|
||||
|
||||
/* unmap the physical BAR in guest MMIO space */
|
||||
for (i = 0; i <= PCI_BARMAX; i++) {
|
||||
|
||||
if (ptdev->bar[i].size == 0 ||
|
||||
i == ptdev_msix_table_bar(ptdev) ||
|
||||
ptdev->bar[i].type == PCIBAR_IO)
|
||||
continue;
|
||||
|
||||
vm_unmap_ptdev_mmio(ctx, ptdev->sel.bus,
|
||||
ptdev->sel.dev, ptdev->sel.func,
|
||||
dev->bar[i].addr, ptdev->bar[i].size,
|
||||
ptdev->bar[i].addr);
|
||||
}
|
||||
|
||||
if (ptdev->phys_bdf == PCI_BDF_GPU) {
|
||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, GPU_GSM_GPA, GPU_GSM_SIZE, gsm_start_hpa);
|
||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, GPU_OPREGION_GPA, GPU_OPREGION_SIZE, opregion_start_hpa);
|
||||
}
|
||||
|
||||
pcidev.virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
pcidev.phys_bdf = ptdev->phys_bdf;
|
||||
pciaccess_cleanup();
|
||||
free(ptdev);
|
||||
vm_unassign_ptdev(ctx, bus, slot, func);
|
||||
vm_deassign_pcidev(ctx, &pcidev);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -619,18 +619,9 @@ int32_t ptirq_prepare_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t
|
||||
spinlock_obtain(&ptdev_lock);
|
||||
entry = ptirq_lookup_entry_by_sid(PTDEV_INTR_MSI, &virt_sid, vm);
|
||||
if (entry == NULL) {
|
||||
/* SOS_VM we add mapping dynamically */
|
||||
if (is_sos_vm(vm) || is_prelaunched_vm(vm)) {
|
||||
entry = add_msix_remapping(vm, virt_bdf, phys_bdf, entry_nr);
|
||||
if (entry == NULL) {
|
||||
pr_err("dev-assign: msi entry exist in others");
|
||||
}
|
||||
} else {
|
||||
/* ptirq_prepare_msix_remap is called by SOS on demand, if
|
||||
* failed to find pre-hold mapping, return error to
|
||||
* the caller.
|
||||
*/
|
||||
pr_err("dev-assign: msi entry not exist");
|
||||
entry = add_msix_remapping(vm, virt_bdf, phys_bdf, entry_nr);
|
||||
if (entry == NULL) {
|
||||
pr_err("dev-assign: msi entry exist in others");
|
||||
}
|
||||
}
|
||||
spinlock_release(&ptdev_lock);
|
||||
|
@ -102,7 +102,7 @@ uint32_t pci_vdev_read_bar(const struct pci_vdev *vdev, uint32_t idx)
|
||||
bar = pci_vdev_read_cfg_u32(vdev, offset);
|
||||
/* Sizing BAR */
|
||||
if (bar == ~0U) {
|
||||
bar = vdev->vbars[idx].mask;
|
||||
bar = vdev->vbars[idx].mask | vdev->vbars[idx].fixed;
|
||||
}
|
||||
return bar;
|
||||
}
|
||||
|
@ -39,8 +39,9 @@
|
||||
static void vpci_init_vdevs(struct acrn_vm *vm);
|
||||
static void deinit_prelaunched_vm_vpci(struct acrn_vm *vm);
|
||||
static void deinit_postlaunched_vm_vpci(struct acrn_vm *vm);
|
||||
static void read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
static void write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
static int32_t read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
static int32_t write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
static struct pci_vdev *find_vdev(struct acrn_vpci *vpci, union pci_bdf bdf);
|
||||
|
||||
/**
|
||||
* @pre vcpu != NULL
|
||||
@ -65,18 +66,33 @@ static bool pci_cfgaddr_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t by
|
||||
/**
|
||||
* @pre vcpu != NULL
|
||||
* @pre vcpu->vm != NULL
|
||||
*
|
||||
* @retval true on success.
|
||||
* @retval false. (ACRN will deliver this IO request to DM to handle for post-launched VM)
|
||||
*/
|
||||
static bool pci_cfgaddr_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes, uint32_t val)
|
||||
{
|
||||
bool ret = true;
|
||||
struct acrn_vpci *vpci = &vcpu->vm->vpci;
|
||||
union pci_cfg_addr_reg *cfg_addr = &vpci->addr;
|
||||
union pci_bdf vbdf;
|
||||
|
||||
if ((addr == (uint16_t)PCI_CONFIG_ADDR) && (bytes == 4U)) {
|
||||
/* unmask reserved fields: BITs 24-30 and BITs 0-1 */
|
||||
cfg_addr->value = val & (~0x7f000003U);
|
||||
|
||||
if (is_postlaunched_vm(vcpu->vm)) {
|
||||
vbdf.value = cfg_addr->bits.bdf;
|
||||
/* For post-launched VM, ACRN will only handle PT device, all virtual PCI device
|
||||
* still need to deliver to ACRN DM to handle.
|
||||
*/
|
||||
if (find_vdev(vpci, vbdf) == NULL) {
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool vpci_is_valid_access_offset(uint32_t offset, uint32_t bytes)
|
||||
@ -100,40 +116,31 @@ static inline bool vpci_is_valid_access(uint32_t offset, uint32_t bytes)
|
||||
* @pre vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
|
||||
* @pre (get_vm_config(vcpu->vm->vm_id)->load_order == PRE_LAUNCHED_VM)
|
||||
* || (get_vm_config(vcpu->vm->vm_id)->load_order == SOS_VM)
|
||||
*
|
||||
* @retval true on success.
|
||||
* @retval false. (ACRN will deliver this IO request to DM to handle for post-launched VM)
|
||||
*/
|
||||
static bool pci_cfgdata_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct acrn_vm *vm = vcpu->vm;
|
||||
struct acrn_vpci *vpci = &vm->vpci;
|
||||
union pci_cfg_addr_reg cfg_addr;
|
||||
union pci_bdf bdf;
|
||||
uint16_t offset = addr - PCI_CONFIG_DATA;
|
||||
uint32_t val = ~0U;
|
||||
struct acrn_vm_config *vm_config;
|
||||
struct pio_request *pio_req = &vcpu->req.reqs.pio;
|
||||
|
||||
cfg_addr.value = atomic_readandclear32(&vpci->addr.value);
|
||||
if (cfg_addr.bits.enable != 0U) {
|
||||
if (vpci_is_valid_access(cfg_addr.bits.reg_num + offset, bytes)) {
|
||||
vm_config = get_vm_config(vm->vm_id);
|
||||
|
||||
switch (vm_config->load_order) {
|
||||
case PRE_LAUNCHED_VM:
|
||||
case SOS_VM:
|
||||
bdf.value = cfg_addr.bits.bdf;
|
||||
read_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, &val);
|
||||
break;
|
||||
|
||||
default:
|
||||
ASSERT(false, "Error, pci_cfgdata_io_read should only be called for PRE_LAUNCHED_VM and SOS_VM");
|
||||
break;
|
||||
}
|
||||
bdf.value = cfg_addr.bits.bdf;
|
||||
ret = read_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, &val);
|
||||
}
|
||||
}
|
||||
|
||||
pio_req->value = val;
|
||||
|
||||
return true;
|
||||
return (ret == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -142,43 +149,39 @@ static bool pci_cfgdata_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t by
|
||||
* @pre vcpu->vm->vm_id < CONFIG_MAX_VM_NUM
|
||||
* @pre (get_vm_config(vcpu->vm->vm_id)->load_order == PRE_LAUNCHED_VM)
|
||||
* || (get_vm_config(vcpu->vm->vm_id)->load_order == SOS_VM)
|
||||
*
|
||||
* @retval true on success.
|
||||
* @retval false. (ACRN will deliver this IO request to DM to handle for post-launched VM)
|
||||
*/
|
||||
static bool pci_cfgdata_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes, uint32_t val)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct acrn_vm *vm = vcpu->vm;
|
||||
struct acrn_vpci *vpci = &vm->vpci;
|
||||
union pci_cfg_addr_reg cfg_addr;
|
||||
union pci_bdf bdf;
|
||||
uint16_t offset = addr - PCI_CONFIG_DATA;
|
||||
struct acrn_vm_config *vm_config;
|
||||
|
||||
cfg_addr.value = atomic_readandclear32(&vpci->addr.value);
|
||||
if (cfg_addr.bits.enable != 0U) {
|
||||
if (vpci_is_valid_access(cfg_addr.bits.reg_num + offset, bytes)) {
|
||||
vm_config = get_vm_config(vm->vm_id);
|
||||
|
||||
switch (vm_config->load_order) {
|
||||
case PRE_LAUNCHED_VM:
|
||||
case SOS_VM:
|
||||
bdf.value = cfg_addr.bits.bdf;
|
||||
write_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, val);
|
||||
break;
|
||||
|
||||
default:
|
||||
ASSERT(false, "Error, pci_cfgdata_io_write should only be called for PRE_LAUNCHED_VM and SOS_VM");
|
||||
break;
|
||||
}
|
||||
bdf.value = cfg_addr.bits.bdf;
|
||||
ret = write_cfg(vpci, bdf, cfg_addr.bits.reg_num + offset, bytes, val);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return (ret == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre io_req != NULL && private_data != NULL
|
||||
*
|
||||
* @retval 0 on success.
|
||||
* @retval other on false. (ACRN will deliver this MMIO request to DM to handle for post-launched VM)
|
||||
*/
|
||||
static int32_t vpci_handle_mmconfig_access(struct io_request *io_req, void *private_data)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct mmio_request *mmio = &io_req->reqs.mmio;
|
||||
struct acrn_vpci *vpci = (struct acrn_vpci *)private_data;
|
||||
uint64_t pci_mmcofg_base = vpci->pci_mmcfg_base;
|
||||
@ -199,21 +202,21 @@ static int32_t vpci_handle_mmconfig_access(struct io_request *io_req, void *priv
|
||||
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
if (!is_plat_hidden_pdev(bdf)) {
|
||||
read_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t *)&mmio->value);
|
||||
ret = read_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t *)&mmio->value);
|
||||
} else {
|
||||
/* expose and pass through platform hidden devices to SOS */
|
||||
mmio->value = (uint64_t)pci_pdev_read_cfg(bdf, reg_num, (uint32_t)mmio->size);
|
||||
}
|
||||
} else {
|
||||
if (!is_plat_hidden_pdev(bdf)) {
|
||||
write_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t)mmio->value);
|
||||
ret = write_cfg(vpci, bdf, reg_num, (uint32_t)mmio->size, (uint32_t)mmio->value);
|
||||
} else {
|
||||
/* expose and pass through platform hidden devices to SOS */
|
||||
pci_pdev_write_cfg(bdf, reg_num, (uint32_t)mmio->size, (uint32_t)mmio->value);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -241,32 +244,22 @@ void vpci_init(struct acrn_vm *vm)
|
||||
vpci_init_vdevs(vm);
|
||||
|
||||
vm_config = get_vm_config(vm->vm_id);
|
||||
switch (vm_config->load_order) {
|
||||
case SOS_VM:
|
||||
pci_mmcfg_base = get_mmcfg_base();
|
||||
if (vm_config->load_order != PRE_LAUNCHED_VM) {
|
||||
/* PCI MMCONFIG for post-launched VM is fixed to 0xE0000000 */
|
||||
pci_mmcfg_base = (vm_config->load_order == SOS_VM) ? get_mmcfg_base() : 0xE0000000UL;
|
||||
vm->vpci.pci_mmcfg_base = pci_mmcfg_base;
|
||||
register_mmio_emulation_handler(vm, vpci_handle_mmconfig_access,
|
||||
pci_mmcfg_base, pci_mmcfg_base + PCI_MMCONFIG_SIZE, &vm->vpci);
|
||||
/* falls through */
|
||||
case PRE_LAUNCHED_VM:
|
||||
/*
|
||||
* SOS: intercept port CF8 only.
|
||||
* UOS or pre-launched VM: register handler for CF8 only and I/O requests to CF9/CFA/CFB are
|
||||
* not handled by vpci.
|
||||
*/
|
||||
register_pio_emulation_handler(vm, PCI_CFGADDR_PIO_IDX, &pci_cfgaddr_range,
|
||||
pci_cfgaddr_io_read, pci_cfgaddr_io_write);
|
||||
|
||||
/* Intercept and handle I/O ports CFC -- CFF */
|
||||
register_pio_emulation_handler(vm, PCI_CFGDATA_PIO_IDX, &pci_cfgdata_range,
|
||||
pci_cfgdata_io_read, pci_cfgdata_io_write);
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Nothing to do for other vm types */
|
||||
break;
|
||||
}
|
||||
|
||||
/* Intercept and handle I/O ports CF8h */
|
||||
register_pio_emulation_handler(vm, PCI_CFGADDR_PIO_IDX, &pci_cfgaddr_range,
|
||||
pci_cfgaddr_io_read, pci_cfgaddr_io_write);
|
||||
|
||||
/* Intercept and handle I/O ports CFCh -- CFFh */
|
||||
register_pio_emulation_handler(vm, PCI_CFGDATA_PIO_IDX, &pci_cfgdata_range,
|
||||
pci_cfgdata_io_read, pci_cfgdata_io_write);
|
||||
|
||||
spinlock_init(&vm->vpci.lock);
|
||||
}
|
||||
|
||||
@ -390,8 +383,13 @@ static int32_t vpci_write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
|
||||
} else if (offset == PCIR_COMMAND) {
|
||||
vdev_pt_write_command(vdev, (bytes > 2U) ? 2U : bytes, (uint16_t)val);
|
||||
} else {
|
||||
/* passthru to physical device */
|
||||
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
|
||||
if (is_postlaunched_vm(vdev->vpci->vm) &&
|
||||
in_range(offset, PCIR_INTERRUPT_LINE, 4U)) {
|
||||
pci_vdev_write_cfg(vdev, offset, bytes, val);
|
||||
} else {
|
||||
/* passthru to physical device */
|
||||
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -412,8 +410,13 @@ static int32_t vpci_read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset
|
||||
} else if (msixcap_access(vdev, offset)) {
|
||||
vmsix_read_cfg(vdev, offset, bytes, val);
|
||||
} else {
|
||||
/* passthru to physical device */
|
||||
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
|
||||
if (is_postlaunched_vm(vdev->vpci->vm) &&
|
||||
in_range(offset, PCIR_INTERRUPT_LINE, 4U)) {
|
||||
*val = pci_vdev_read_cfg(vdev, offset, bytes);
|
||||
} else {
|
||||
/* passthru to physical device */
|
||||
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -429,25 +432,32 @@ static const struct pci_vdev_ops pci_pt_dev_ops = {
|
||||
/**
|
||||
* @pre vpci != NULL
|
||||
*/
|
||||
static void read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
static int32_t read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
uint32_t offset, uint32_t bytes, uint32_t *val)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct pci_vdev *vdev;
|
||||
|
||||
spinlock_obtain(&vpci->lock);
|
||||
vdev = find_vdev(vpci, bdf);
|
||||
if (vdev != NULL) {
|
||||
vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
|
||||
} else {
|
||||
if (is_postlaunched_vm(vpci->vm)) {
|
||||
ret = -ENODEV;
|
||||
}
|
||||
}
|
||||
spinlock_release(&vpci->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vpci != NULL
|
||||
*/
|
||||
static void write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
static int32_t write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
uint32_t offset, uint32_t bytes, uint32_t val)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct pci_vdev *vdev;
|
||||
|
||||
spinlock_obtain(&vpci->lock);
|
||||
@ -455,10 +465,15 @@ static void write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
if (vdev != NULL) {
|
||||
vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
|
||||
} else {
|
||||
pr_acrnlog("%s %x:%x.%x not found! off: 0x%x, val: 0x%x\n", __func__,
|
||||
if (!is_postlaunched_vm(vpci->vm)) {
|
||||
pr_acrnlog("%s %x:%x.%x not found! off: 0x%x, val: 0x%x\n", __func__,
|
||||
bdf.bits.b, bdf.bits.d, bdf.bits.f, offset, val);
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
}
|
||||
}
|
||||
spinlock_release(&vpci->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user