mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-20 12:42:54 +00:00
dm: vPCI: remove passthrough PCI device unused code
Now we split passthrough PCI device from DM to HV, we could remove all the passthrough PCI device unused code. Tracked-On: #4371 Signed-off-by: Li Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
dafa3da693
commit
9fa6eff3c5
@ -457,28 +457,6 @@ vm_set_gsi_irq(struct vmctx *ctx, int gsi, uint32_t operation)
|
||||
return ioctl(ctx->fd, IC_SET_IRQLINE, *req);
|
||||
}
|
||||
|
||||
int
|
||||
vm_assign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
|
||||
{
|
||||
uint16_t bdf;
|
||||
|
||||
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
|
||||
(func & 0x7);
|
||||
|
||||
return ioctl(ctx->fd, IC_ASSIGN_PTDEV, &bdf);
|
||||
}
|
||||
|
||||
int
|
||||
vm_unassign_ptdev(struct vmctx *ctx, int bus, int slot, int func)
|
||||
{
|
||||
uint16_t bdf;
|
||||
|
||||
bdf = ((bus & 0xff) << 8) | ((slot & 0x1f) << 3) |
|
||||
(func & 0x7);
|
||||
|
||||
return ioctl(ctx->fd, IC_DEASSIGN_PTDEV, &bdf);
|
||||
}
|
||||
|
||||
int
|
||||
vm_assign_pcidev(struct vmctx *ctx, struct acrn_assign_pcidev *pcidev)
|
||||
{
|
||||
@ -523,30 +501,6 @@ vm_unmap_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
|
||||
return ioctl(ctx->fd, IC_UNSET_MEMSEG, &memmap);
|
||||
}
|
||||
|
||||
int
|
||||
vm_set_ptdev_msix_info(struct vmctx *ctx, struct ic_ptdev_irq *ptirq)
|
||||
{
|
||||
if (!ptirq)
|
||||
return -1;
|
||||
|
||||
return ioctl(ctx->fd, IC_SET_PTDEV_INTR_INFO, ptirq);
|
||||
}
|
||||
|
||||
int
|
||||
vm_reset_ptdev_msix_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
|
||||
int vector_count)
|
||||
{
|
||||
struct ic_ptdev_irq ptirq;
|
||||
|
||||
bzero(&ptirq, sizeof(ptirq));
|
||||
ptirq.type = IRQ_MSIX;
|
||||
ptirq.virt_bdf = virt_bdf;
|
||||
ptirq.phys_bdf = phys_bdf;
|
||||
ptirq.msix.vector_cnt = vector_count;
|
||||
|
||||
return ioctl(ctx->fd, IC_RESET_PTDEV_INTR_INFO, &ptirq);
|
||||
}
|
||||
|
||||
int
|
||||
vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
|
||||
int virt_pin, int phys_pin, bool pic_pin)
|
||||
|
@ -218,15 +218,6 @@ CFGREAD(struct pci_vdev *dev, int coff, int bytes)
|
||||
return pci_get_cfgdata32(dev, coff);
|
||||
}
|
||||
|
||||
static inline int
|
||||
is_pci_gvt(struct pci_vdev *dev)
|
||||
{
|
||||
if (dev == NULL || strncmp(dev->dev_ops->class_name, "pci-gvt",7))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
is_pt_pci(struct pci_vdev *dev)
|
||||
{
|
||||
|
@ -46,19 +46,6 @@
|
||||
#include "pci_core.h"
|
||||
#include "acpi.h"
|
||||
|
||||
#ifndef PCI_COMMAND_INTX_DISABLE
|
||||
#define PCI_COMMAND_INTX_DISABLE ((uint16_t)0x400)
|
||||
#endif
|
||||
|
||||
/* Used to temporarily set mmc & mme to support only one vector for MSI,
|
||||
* remove it when multiple vectors for MSI is ready.
|
||||
*/
|
||||
#define FORCE_MSI_SINGLE_VECTOR 1
|
||||
|
||||
#define MSIX_TABLE_COUNT(ctrl) (((ctrl) & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
|
||||
#define MSIX_CAPLEN 12
|
||||
|
||||
#define PCI_BDF_GPU 0x00000010 /* 00:02.0 */
|
||||
|
||||
/* Some audio drivers get topology data from ACPI NHLT table.
|
||||
* For such drivers, we need to copy the host NHLT table to make it
|
||||
@ -87,29 +74,14 @@ extern uint64_t audio_nhlt_len;
|
||||
static int pciaccess_ref_cnt;
|
||||
static pthread_mutex_t ref_cnt_mtx = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
struct mmio_map {
|
||||
uint64_t gpa;
|
||||
uint64_t hpa;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
uint32_t gsm_start_hpa = 0;
|
||||
uint32_t opregion_start_hpa = 0;
|
||||
|
||||
struct passthru_dev {
|
||||
struct pci_vdev *dev;
|
||||
struct pcibar bar[PCI_BARMAX + 1];
|
||||
struct {
|
||||
int capoff;
|
||||
int msgctrl;
|
||||
int emulated;
|
||||
} msi;
|
||||
struct {
|
||||
int capoff;
|
||||
int table_size; /* page aligned size */
|
||||
void *table_pages;
|
||||
int table_offset; /* page aligned */
|
||||
bool ptirq_allocated;
|
||||
} msix;
|
||||
bool pcie_cap;
|
||||
struct pcisel sel;
|
||||
@ -120,34 +92,8 @@ struct passthru_dev {
|
||||
* need_reset - reset dev before passthrough
|
||||
*/
|
||||
bool need_reset;
|
||||
/* The memory pages, which not overlap with MSI-X table will be
|
||||
* passed-through to guest, two potential ranges, before and after MSI-X
|
||||
* Table if any.
|
||||
*/
|
||||
struct mmio_map msix_bar_mmio[2];
|
||||
};
|
||||
|
||||
static int
|
||||
msi_caplen(int msgctrl)
|
||||
{
|
||||
int len;
|
||||
|
||||
len = 10; /* minimum length of msi capability */
|
||||
|
||||
if (msgctrl & PCIM_MSICTRL_64BIT)
|
||||
len += 4;
|
||||
|
||||
/*
|
||||
* Ignore the 'mask' and 'pending' bits in the MSI capability
|
||||
* (msgctrl & PCIM_MSICTRL_VECTOR).
|
||||
* Ignore 10 bytes in total (2-byte reserved, 4-byte mask bits,
|
||||
* 4-byte pending bits).
|
||||
* We'll let the guest manipulate them directly.
|
||||
*/
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
read_config(struct pci_device *phys_dev, long reg, int width)
|
||||
{
|
||||
@ -171,47 +117,10 @@ read_config(struct pci_device *phys_dev, long reg, int width)
|
||||
return temp;
|
||||
}
|
||||
|
||||
static int
|
||||
write_config(struct pci_device *phys_dev, long reg, int width, uint32_t data)
|
||||
{
|
||||
int temp = -1;
|
||||
|
||||
switch (width) {
|
||||
case 1:
|
||||
temp = pci_device_cfg_write_u8(phys_dev, data, reg);
|
||||
break;
|
||||
case 2:
|
||||
temp = pci_device_cfg_write_u16(phys_dev, data, reg);
|
||||
break;
|
||||
case 4:
|
||||
temp = pci_device_cfg_write_u32(phys_dev, data, reg);
|
||||
break;
|
||||
default:
|
||||
warnx("%s: invalid reg width", __func__);
|
||||
}
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
|
||||
#ifdef FORCE_MSI_SINGLE_VECTOR
|
||||
/* Temporarily set mmc & mme to 0.
|
||||
* Remove it when multiple vectors for MSI ready.
|
||||
*/
|
||||
static inline void
|
||||
clear_mmc_mme(uint32_t *val)
|
||||
{
|
||||
*val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16);
|
||||
*val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
{
|
||||
int ptr, capptr, cap, sts, caplen;
|
||||
uint32_t u32;
|
||||
struct pci_vdev *dev = ptdev->dev;
|
||||
int ptr, cap, sts;
|
||||
struct pci_device *phys_dev = ptdev->phys_dev;
|
||||
|
||||
/*
|
||||
@ -224,56 +133,9 @@ cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
while (ptr != 0 && ptr != 0xff) {
|
||||
cap = read_config(phys_dev, ptr + PCICAP_ID, 1);
|
||||
if (cap == PCIY_MSI) {
|
||||
/*
|
||||
* Copy the MSI capability into the config
|
||||
* space of the emulated pci device
|
||||
*/
|
||||
ptdev->msi.capoff = ptr;
|
||||
ptdev->msi.msgctrl = read_config(phys_dev,
|
||||
ptr + 2, 2);
|
||||
|
||||
#ifdef FORCE_MSI_SINGLE_VECTOR
|
||||
/* Temporarily set mmc & mme to 0,
|
||||
* which means supporting 1 vector. So that
|
||||
* guest will not enable more than 1 vector.
|
||||
* Remove it when multiple vectors ready.
|
||||
*/
|
||||
ptdev->msi.msgctrl &= ~PCIM_MSICTRL_MMC_MASK;
|
||||
ptdev->msi.msgctrl &= ~PCIM_MSICTRL_MME_MASK;
|
||||
#endif
|
||||
|
||||
ptdev->msi.emulated = 0;
|
||||
caplen = msi_caplen(ptdev->msi.msgctrl);
|
||||
capptr = ptr;
|
||||
while (caplen > 0) {
|
||||
u32 = read_config(phys_dev, capptr, 4);
|
||||
|
||||
#ifdef FORCE_MSI_SINGLE_VECTOR
|
||||
/* Temporarily set mmc & mme to 0.
|
||||
* which means supporting 1 vector.
|
||||
* Remove it when multiple vectors ready
|
||||
*/
|
||||
if (capptr == ptdev->msi.capoff)
|
||||
clear_mmc_mme(&u32);
|
||||
#endif
|
||||
|
||||
pci_set_cfgdata32(dev, capptr, u32);
|
||||
caplen -= 4;
|
||||
capptr += 4;
|
||||
}
|
||||
} else if (cap == PCIY_MSIX) {
|
||||
/*
|
||||
* Copy the MSI-X capability
|
||||
*/
|
||||
ptdev->msix.capoff = ptr;
|
||||
caplen = 12;
|
||||
capptr = ptr;
|
||||
while (caplen > 0) {
|
||||
u32 = read_config(phys_dev, capptr, 4);
|
||||
pci_set_cfgdata32(dev, capptr, u32);
|
||||
caplen -= 4;
|
||||
capptr += 4;
|
||||
}
|
||||
} else if (cap == PCIY_EXPRESS)
|
||||
ptdev->pcie_cap = true;
|
||||
|
||||
@ -284,69 +146,6 @@ cfginit_cap(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
msix_table_read(struct passthru_dev *ptdev, uint64_t offset, int size)
|
||||
{
|
||||
uint8_t *src8;
|
||||
uint16_t *src16;
|
||||
uint32_t *src32;
|
||||
uint64_t *src64;
|
||||
uint64_t data;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
src8 = (uint8_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
data = *src8;
|
||||
break;
|
||||
case 2:
|
||||
src16 = (uint16_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
data = *src16;
|
||||
break;
|
||||
case 4:
|
||||
src32 = (uint32_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
data = *src32;
|
||||
break;
|
||||
case 8:
|
||||
src64 = (uint64_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
data = *src64;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static void
|
||||
msix_table_write(struct passthru_dev *ptdev, uint64_t offset, int size, uint64_t data)
|
||||
{
|
||||
uint8_t *dest8;
|
||||
uint16_t *dest16;
|
||||
uint32_t *dest32;
|
||||
uint64_t *dest64;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
dest8 = (uint8_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
*dest8 = data;
|
||||
break;
|
||||
case 2:
|
||||
dest16 = (uint16_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
*dest16 = data;
|
||||
break;
|
||||
case 4:
|
||||
dest32 = (uint32_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
*dest32 = data;
|
||||
break;
|
||||
case 8:
|
||||
dest64 = (uint64_t *)(ptdev->msix.table_pages + offset - ptdev->msix.table_offset);
|
||||
*dest64 = data;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int ptdev_msix_table_bar(struct passthru_dev *ptdev)
|
||||
{
|
||||
return ptdev->dev->msix.table_bar;
|
||||
@ -357,146 +156,6 @@ static inline int ptdev_msix_pba_bar(struct passthru_dev *ptdev)
|
||||
return ptdev->dev->msix.pba_bar;
|
||||
}
|
||||
|
||||
static int
|
||||
init_msix_table(struct vmctx *ctx, struct passthru_dev *ptdev, uint64_t base)
|
||||
{
|
||||
int b, s, f;
|
||||
int error, idx;
|
||||
size_t len, remaining;
|
||||
uint32_t table_size, table_offset;
|
||||
vm_paddr_t start;
|
||||
struct pci_vdev *dev = ptdev->dev;
|
||||
uint16_t virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
struct ic_ptdev_irq ptirq;
|
||||
|
||||
b = ptdev->sel.bus;
|
||||
s = ptdev->sel.dev;
|
||||
f = ptdev->sel.func;
|
||||
|
||||
/*
|
||||
* If the MSI-X table BAR maps memory intended for
|
||||
* other uses, it is at least assured that the table
|
||||
* either resides in its own page within the region,
|
||||
* or it resides in a page shared with only the PBA.
|
||||
*/
|
||||
table_offset = rounddown2(dev->msix.table_offset, 4096);
|
||||
|
||||
table_size = dev->msix.table_offset - table_offset;
|
||||
table_size += dev->msix.table_count * MSIX_TABLE_ENTRY_SIZE;
|
||||
table_size = roundup2(table_size, 4096);
|
||||
|
||||
idx = dev->msix.table_bar;
|
||||
start = dev->bar[idx].addr;
|
||||
remaining = dev->bar[idx].size;
|
||||
|
||||
/* Map everything before the MSI-X table */
|
||||
if (table_offset > 0) {
|
||||
len = table_offset;
|
||||
error = vm_map_ptdev_mmio(ctx, b, s, f, start, len, base);
|
||||
if (error) {
|
||||
warnx(
|
||||
"Failed to map MSI-X BAR passthru pages on %x/%x/%x",
|
||||
b,s,f);
|
||||
return error;
|
||||
}
|
||||
/* save mapping info, which need to be unmapped when deinit */
|
||||
ptdev->msix_bar_mmio[0].gpa = start;
|
||||
ptdev->msix_bar_mmio[0].hpa = base;
|
||||
ptdev->msix_bar_mmio[0].size = len;
|
||||
|
||||
base += len;
|
||||
start += len;
|
||||
remaining -= len;
|
||||
}
|
||||
|
||||
/* remap real msix table (page-aligned) to user space */
|
||||
error = pci_device_map_range(ptdev->phys_dev, base, table_size,
|
||||
PCI_DEV_MAP_FLAG_WRITABLE, &ptdev->msix.table_pages);
|
||||
if (error) {
|
||||
warnx("Failed to map MSI-X table pages on %x/%x/%x", b,s,f);
|
||||
return error;
|
||||
}
|
||||
ptdev->msix.table_offset = table_offset;
|
||||
ptdev->msix.table_size = table_size;
|
||||
|
||||
/* Handle MSI-X vectors:
|
||||
* request to alloc vector entries of MSI-X.
|
||||
* Set table_paddr/table_size to 0 to skip ioremap in sos kernel.
|
||||
*/
|
||||
ptirq.type = IRQ_MSIX;
|
||||
ptirq.virt_bdf = virt_bdf;
|
||||
ptirq.phys_bdf = ptdev->phys_bdf;
|
||||
ptirq.msix.vector_cnt = dev->msix.table_count;
|
||||
ptirq.msix.table_paddr = 0;
|
||||
ptirq.msix.table_size = 0;
|
||||
error = vm_set_ptdev_msix_info(ctx, &ptirq);
|
||||
if (error) {
|
||||
warnx("Failed to alloc ptirq entry on %x/%x/%x", b,s,f);
|
||||
return error;
|
||||
}
|
||||
ptdev->msix.ptirq_allocated = true;
|
||||
|
||||
|
||||
/* Skip the MSI-X table */
|
||||
base += table_size;
|
||||
start += table_size;
|
||||
remaining -= table_size;
|
||||
|
||||
/* Map everything beyond the end of the MSI-X table */
|
||||
if (remaining > 0) {
|
||||
len = remaining;
|
||||
error = vm_map_ptdev_mmio(ctx, b, s, f, start, len, base);
|
||||
if (error) {
|
||||
warnx(
|
||||
"Failed to map MSI-X BAR passthru pages on %x/%x/%x",
|
||||
b,s,f);
|
||||
return error;
|
||||
}
|
||||
/* save mapping info, which need to be unmapped when deinit */
|
||||
ptdev->msix_bar_mmio[1].gpa = start;
|
||||
ptdev->msix_bar_mmio[1].hpa = base;
|
||||
ptdev->msix_bar_mmio[1].size = len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
deinit_msix_table(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
{
|
||||
struct pci_vdev *dev = ptdev->dev;
|
||||
uint16_t virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
int vector_cnt = dev->msix.table_count;
|
||||
|
||||
if (ptdev->msix.ptirq_allocated) {
|
||||
pr_info("ptdev reset msix: 0x%x-%x, vector_cnt=%d.\n",
|
||||
virt_bdf, ptdev->phys_bdf, vector_cnt);
|
||||
vm_reset_ptdev_msix_info(ctx, virt_bdf, ptdev->phys_bdf, vector_cnt);
|
||||
ptdev->msix.ptirq_allocated = false;
|
||||
}
|
||||
|
||||
if (ptdev->msix.table_pages) {
|
||||
pci_device_unmap_range(ptdev->phys_dev, ptdev->msix.table_pages, ptdev->msix.table_size);
|
||||
ptdev->msix.table_pages = NULL;
|
||||
}
|
||||
|
||||
/* We passthrough the pages not overlap with MSI-X table to guest,
|
||||
* need to unmap them when deinit.
|
||||
*/
|
||||
for (int i = 0; i < 2; i++) {
|
||||
if(ptdev->msix_bar_mmio[i].size != 0) {
|
||||
if (vm_unmap_ptdev_mmio(ctx, ptdev->sel.bus,
|
||||
ptdev->sel.dev, ptdev->sel.func,
|
||||
ptdev->msix_bar_mmio[i].gpa,
|
||||
ptdev->msix_bar_mmio[i].size,
|
||||
ptdev->msix_bar_mmio[i].hpa)) {
|
||||
warnx("Failed to unmap MSI-X BAR pt pages.");
|
||||
}
|
||||
ptdev->msix_bar_mmio[i].size = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
cfginitbar(struct vmctx *ctx, struct passthru_dev *ptdev)
|
||||
{
|
||||
@ -822,20 +481,6 @@ passthru_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
if (error < 0)
|
||||
goto done;
|
||||
|
||||
if (ptdev->phys_bdf == PCI_BDF_GPU) {
|
||||
/* get gsm hpa */
|
||||
gsm_start_hpa = read_config(ptdev->phys_dev, PCIR_BDSM, 4);
|
||||
gsm_start_hpa &= PCIM_BDSM_GSM_MASK;
|
||||
/* initialize the EPT mapping for passthrough GPU gsm region */
|
||||
vm_map_ptdev_mmio(ctx, 0, 2, 0, GPU_GSM_GPA, GPU_GSM_SIZE, gsm_start_hpa);
|
||||
|
||||
/* get opregion hpa */
|
||||
opregion_start_hpa = read_config(ptdev->phys_dev, PCIR_ASLS_CTL, 4);
|
||||
opregion_start_hpa &= PCIM_ASLS_OPREGION_MASK;
|
||||
/* initialize the EPT mapping for passthrough GPU opregion */
|
||||
vm_map_ptdev_mmio(ctx, 0, 2, 0, GPU_OPREGION_GPA, GPU_OPREGION_SIZE, opregion_start_hpa);
|
||||
}
|
||||
|
||||
pcidev.virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
pcidev.phys_bdf = ptdev->phys_bdf;
|
||||
for (idx = 0; idx <= PCI_BARMAX; idx++) {
|
||||
@ -900,11 +545,6 @@ passthru_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts)
|
||||
vm_reset_ptdev_intx_info(ctx, virt_bdf, ptdev->phys_bdf, dev->lintr.ioapic_irq, false);
|
||||
}
|
||||
|
||||
if (ptdev->phys_bdf == PCI_BDF_GPU) {
|
||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, GPU_GSM_GPA, GPU_GSM_SIZE, gsm_start_hpa);
|
||||
vm_unmap_ptdev_mmio(ctx, 0, 2, 0, GPU_OPREGION_GPA, GPU_OPREGION_SIZE, opregion_start_hpa);
|
||||
}
|
||||
|
||||
pcidev.virt_bdf = PCI_BDF(dev->bus, dev->slot, dev->func);
|
||||
pcidev.phys_bdf = ptdev->phys_bdf;
|
||||
pciaccess_cleanup();
|
||||
@ -968,82 +608,17 @@ passthru_bind_irq(struct vmctx *ctx, struct pci_vdev *dev)
|
||||
dev->lintr.ioapic_irq, ptdev->phys_pin, false);
|
||||
}
|
||||
|
||||
static int
|
||||
bar_access(int coff)
|
||||
{
|
||||
if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
||||
int coff, int bytes, uint32_t *rv)
|
||||
{
|
||||
struct passthru_dev *ptdev;
|
||||
|
||||
ptdev = dev->arg;
|
||||
|
||||
/*
|
||||
* PCI BARs and MSI capability is emulated.
|
||||
*/
|
||||
if (bar_access(coff))
|
||||
return -1;
|
||||
|
||||
/* INTLINE/INTPIN/MINGNT/MAXLAT need to be hacked */
|
||||
if (coff >= PCIR_INTLINE && coff <= PCIR_MAXLAT)
|
||||
return -1;
|
||||
|
||||
/* Everything else just read from the device's config space */
|
||||
*rv = read_config(ptdev->phys_dev, coff, bytes);
|
||||
|
||||
/* passthru_init has initialized the EPT mapping
|
||||
* for GPU gsm region.
|
||||
* So uos GPU can passthrough physical gsm now.
|
||||
* Here, only need return gsm gpa value for uos.
|
||||
*/
|
||||
if ((PCI_BDF(dev->bus, dev->slot, dev->func) == PCI_BDF_GPU)
|
||||
&& (coff == PCIR_BDSM)) {
|
||||
*rv &= ~PCIM_BDSM_GSM_MASK;
|
||||
*rv |= GPU_GSM_GPA;
|
||||
}
|
||||
|
||||
/* passthru_init has initialized the EPT mapping
|
||||
* for GPU opregion.
|
||||
* So uos GPU can passthrough physical opregion now.
|
||||
* Here, only need return opregion gpa value for uos.
|
||||
*/
|
||||
if ((PCI_BDF(dev->bus, dev->slot, dev->func) == PCI_BDF_GPU)
|
||||
&& (coff == PCIR_ASLS_CTL)) {
|
||||
/* reserve opregion start addr offset to 4KB page */
|
||||
*rv &= ~PCIM_ASLS_OPREGION_MASK;
|
||||
*rv |= GPU_OPREGION_GPA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ~0;
|
||||
}
|
||||
|
||||
static int
|
||||
passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_vdev *dev,
|
||||
int coff, int bytes, uint32_t val)
|
||||
{
|
||||
struct passthru_dev *ptdev;
|
||||
|
||||
ptdev = dev->arg;
|
||||
|
||||
/*
|
||||
* PCI BARs are emulated
|
||||
*/
|
||||
if (bar_access(coff))
|
||||
return -1;
|
||||
|
||||
/* INTLINE/INTPIN/MINGNT/MAXLAT need to be hacked */
|
||||
if (coff >= PCIR_INTLINE && coff <= PCIR_MAXLAT)
|
||||
return -1;
|
||||
|
||||
write_config(ptdev->phys_dev, coff, bytes, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1051,36 +626,13 @@ static void
|
||||
passthru_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx,
|
||||
uint64_t offset, int size, uint64_t value)
|
||||
{
|
||||
struct passthru_dev *ptdev;
|
||||
|
||||
ptdev = dev->arg;
|
||||
|
||||
if (baridx == ptdev_msix_table_bar(ptdev)) {
|
||||
msix_table_write(ptdev, offset, size, value);
|
||||
} else {
|
||||
/* TODO: Add support for IO BAR of PTDev */
|
||||
warnx("Passthru: PIO write not supported, ignored\n");
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
passthru_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx,
|
||||
uint64_t offset, int size)
|
||||
{
|
||||
struct passthru_dev *ptdev;
|
||||
uint64_t val;
|
||||
|
||||
ptdev = dev->arg;
|
||||
|
||||
if (baridx == ptdev_msix_table_bar(ptdev)) {
|
||||
val = msix_table_read(ptdev, offset, size);
|
||||
} else {
|
||||
/* TODO: Add support for IO BAR of PTDev */
|
||||
warnx("Passthru: PIO read not supported\n");
|
||||
val = (uint64_t)(-1);
|
||||
}
|
||||
|
||||
return val;
|
||||
return ~0UL;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -194,30 +194,16 @@ struct ic_ptdev_irq {
|
||||
uint16_t virt_bdf; /* IN: Device virtual BDF# */
|
||||
/** physical bdf description of pass thru device */
|
||||
uint16_t phys_bdf; /* IN: Device physical BDF# */
|
||||
union {
|
||||
/** info of IOAPIC/PIC interrupt */
|
||||
struct {
|
||||
/** virtual IOAPIC pin */
|
||||
uint32_t virt_pin;
|
||||
/** physical IOAPIC pin */
|
||||
uint32_t phys_pin;
|
||||
/** PIC pin */
|
||||
uint32_t is_pic_pin;
|
||||
} intx;
|
||||
|
||||
/** info of MSI/MSIX interrupt */
|
||||
struct {
|
||||
/* Keep this filed on top of msix */
|
||||
/** vector count of MSI/MSIX */
|
||||
uint32_t vector_cnt;
|
||||
|
||||
/** size of MSIX table(round up to 4K) */
|
||||
uint32_t table_size;
|
||||
|
||||
/** physical address of MSIX table */
|
||||
uint64_t table_paddr;
|
||||
} msix;
|
||||
};
|
||||
/** info of IOAPIC/PIC interrupt */
|
||||
struct {
|
||||
/** virtual IOAPIC pin */
|
||||
uint32_t virt_pin;
|
||||
/** physical IOAPIC pin */
|
||||
uint32_t phys_pin;
|
||||
/** PIC pin */
|
||||
uint32_t is_pic_pin;
|
||||
} intx;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -123,17 +123,12 @@ int vm_run(struct vmctx *ctx);
|
||||
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
|
||||
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);
|
||||
int vm_set_gsi_irq(struct vmctx *ctx, int gsi, uint32_t operation);
|
||||
int vm_assign_ptdev(struct vmctx *ctx, int bus, int slot, int func);
|
||||
int vm_unassign_ptdev(struct vmctx *ctx, int bus, int slot, int func);
|
||||
int vm_assign_pcidev(struct vmctx *ctx, struct acrn_assign_pcidev *pcidev);
|
||||
int vm_deassign_pcidev(struct vmctx *ctx, struct acrn_assign_pcidev *pcidev);
|
||||
int vm_map_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
|
||||
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
|
||||
int vm_unmap_ptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
|
||||
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
|
||||
int vm_set_ptdev_msix_info(struct vmctx *ctx, struct ic_ptdev_irq *ptirq);
|
||||
int vm_reset_ptdev_msix_info(struct vmctx *ctx, uint16_t virt_bdf, uint16_t phys_bdf,
|
||||
int vector_count);
|
||||
int vm_set_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf,
|
||||
uint16_t phys_bdf, int virt_pin, int phys_pin, bool pic_pin);
|
||||
int vm_reset_ptdev_intx_info(struct vmctx *ctx, uint16_t virt_bdf,
|
||||
|
Loading…
Reference in New Issue
Block a user