HV: use the cached pci device info for sharing mode

Tracked-On: #2431
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
dongshen
2019-01-24 16:34:30 -08:00
committed by ACRN System Integration
parent e0f9d14011
commit 983b717a61
5 changed files with 97 additions and 156 deletions

View File

@@ -33,6 +33,7 @@
static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
{
bool ret;
if (vdev->msi.capoff == 0U) {
ret = 0;
} else {
@@ -83,7 +84,8 @@ static int32_t vmsi_remap(const struct pci_vdev *vdev, bool enable)
/* Update MSI Capability structure to physical device */
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR, 0x4U, (uint32_t)info.pmsi_addr.full);
if ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) {
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U, (uint32_t)(info.pmsi_addr.full >> 32U));
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U,
(uint32_t)(info.pmsi_addr.full >> 32U));
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA_64BIT, 0x2U, (uint16_t)info.pmsi_data.full);
} else {
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA, 0x2U, (uint16_t)info.pmsi_data.full);
@@ -105,7 +107,7 @@ static int32_t vmsi_cfgread(const struct pci_vdev *vdev, uint32_t offset, uint32
/* For PIO access, we emulate Capability Structures only */
if (msicap_access(vdev, offset)) {
*val = pci_vdev_read_cfg(vdev, offset, bytes);
ret = 0;
ret = 0;
} else {
ret = -ENODEV;
}
@@ -166,68 +168,53 @@ static const struct pci_vdev_ops pci_ops_vdev_msi = {
.cfgread = vmsi_cfgread,
};
/* Read a uint32_t from buffer (little endian) */
static uint32_t buf_read32(const uint8_t buf[])
{
return buf[0] | ((uint32_t)buf[1] << 8U) | ((uint32_t)buf[2] << 16U) | ((uint32_t)buf[3] << 24U);
}
/* Write a uint32_t to buffer (little endian) */
static void buf_write32(uint8_t buf[], uint32_t val)
{
buf[0] = (uint8_t)(val & 0xFFU);
buf[1] = (uint8_t)((val >> 8U) & 0xFFU);
buf[2] = (uint8_t)((val >> 16U) & 0xFFU);
buf[3] = (uint8_t)((val >> 24U) & 0xFFU);
}
void populate_msi_struct(struct pci_vdev *vdev)
{
uint8_t ptr, cap;
uint32_t msgctrl;
uint32_t len, bytes, offset, val;
union pci_bdf pbdf = vdev->pdev.bdf;
struct pci_pdev *pdev = &vdev->pdev;
uint32_t val;
/* Has new Capabilities list? */
if ((pci_pdev_read_cfg(pbdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) != 0U) {
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, PCIR_CAP_PTR, 1U);
while ((ptr != 0U) && (ptr != 0xFFU)) {
cap = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_ID, 1U);
/* Copy MSI/MSI-X capability struct into virtual device */
if (pdev->msi.capoff != 0U) {
vdev->msi.capoff = pdev->msi.capoff;
vdev->msi.caplen = pdev->msi.caplen;
/* Ignore all other Capability IDs for now */
if ((cap == PCIY_MSI) || (cap == PCIY_MSIX)) {
offset = ptr;
if (cap == PCIY_MSI) {
vdev->msi.capoff = offset;
msgctrl = pci_pdev_read_cfg(pbdf, offset + PCIR_MSI_CTRL, 2U);
/* Assign MSI handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msi);
/*
* Ignore the 'mask' and 'pending' bits in the MSI capability
* (msgctrl & PCIM_MSICTRL_VECTOR).
* We'll let the guest manipulate them directly.
*/
len = ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) ? 14U : 10U;
vdev->msi.caplen = len;
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msi.capoff], pdev->msi.caplen,
(void *)&pdev->msi.cap[0U], pdev->msi.caplen);
/* Assign MSI handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msi);
} else {
vdev->msix.capoff = offset;
vdev->msix.caplen = MSIX_CAPLEN;
len = vdev->msix.caplen;
val = buf_read32(&pdev->msi.cap[0U]);
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
/* Assign MSI-X handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msix);
}
buf_write32(&vdev->cfgdata.data_8[pdev->msi.capoff], val);
}
/* Copy MSI/MSI-X capability struct into virtual device */
while (len > 0U) {
bytes = (len >= 4U) ? 4U : len;
val = pci_pdev_read_cfg(pbdf, offset, bytes);
if (pdev->msix.capoff != 0U) {
vdev->msix.capoff = pdev->msix.capoff;
vdev->msix.caplen = pdev->msix.caplen;
if ((cap == PCIY_MSI) && (offset == vdev->msi.capoff)) {
/*
* Don't support multiple vector for now,
* Force Multiple Message Enable and Multiple Message
* Capable to 0
*/
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
}
/* Assign MSI-X handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msix);
pci_vdev_write_cfg(vdev, offset, bytes, val);
len -= bytes;
offset += bytes;
}
}
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_NEXTPTR, 1U);
}
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
}
}

View File

@@ -155,7 +155,7 @@ static int32_t vmsix_cfgread(const struct pci_vdev *vdev, uint32_t offset, uint3
if (msixcap_access(vdev, offset)) {
*val = pci_vdev_read_cfg(vdev, offset, bytes);
ret = 0;
ret = 0;
} else {
ret = -ENODEV;
}
@@ -189,8 +189,7 @@ static int32_t vmsix_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t b
pci_pdev_write_cfg(vdev->pdev.bdf, offset, 2U, val);
}
}
ret = 0;
ret = 0;
} else {
ret = -ENODEV;
}
@@ -230,6 +229,7 @@ static void vmsix_table_rw(struct pci_vdev *vdev, struct mmio_request *mmio, uin
*/
if (entry_offset < offsetof(struct msix_table_entry, data)) {
uint64_t qword_mask = ~0UL;
if (mmio->size == 4U) {
qword_mask = (entry_offset == 0U) ?
0x00000000FFFFFFFFUL : 0xFFFFFFFF00000000UL;
@@ -311,67 +311,18 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
return ret;
}
static void decode_msix_table_bar(struct pci_vdev *vdev)
{
uint32_t bir = vdev->msix.table_bar;
union pci_bdf pbdf = vdev->pdev.bdf;
uint64_t base, size;
uint32_t bar_lo, bar_hi, val32;
bar_lo = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir), 4U);
if ((bar_lo & PCIM_BAR_SPACE) != PCIM_BAR_IO_SPACE) {
/* Get the base address */
base = (uint64_t)bar_lo & PCIM_BAR_MEM_BASE;
if ((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) {
bar_hi = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir + 1U), 4U);
base |= ((uint64_t)bar_hi << 32U);
}
vdev->msix.mmio_hva = (uint64_t)hpa2hva(base);
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(base);
/* Sizing the BAR */
size = 0U;
if (((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) && (bir < (PCI_BAR_COUNT - 1U))) {
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir + 1U), 4U, ~0U);
size = (uint64_t)pci_pdev_read_cfg(pbdf, pci_bar_offset(bir + 1U), 4U);
size <<= 32U;
}
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir), 4U, ~0U);
val32 = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir), 4U);
size |= ((uint64_t)val32 & PCIM_BAR_MEM_BASE);
vdev->msix.mmio_size = size & ~(size - 1U);
/* Restore the BAR */
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir), 4U, bar_lo);
if ((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) {
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir + 1U), 4U, bar_hi);
}
} else {
/* I/O bar, should never happen */
pr_err("PCI device (%x) has MSI-X Table at IO BAR", vdev->vbdf.value);
}
}
static int32_t vmsix_init(struct pci_vdev *vdev)
{
uint32_t msgctrl;
uint32_t table_info, i;
uint32_t i;
uint64_t addr_hi, addr_lo;
struct pci_msix *msix = &vdev->msix;
struct pci_pdev *pdev = &vdev->pdev;
struct pci_bar *bar;
int32_t ret;
msgctrl = pci_pdev_read_cfg(vdev->pdev.bdf, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
/* Read Table Offset and Table BIR */
table_info = pci_pdev_read_cfg(vdev->pdev.bdf, msix->capoff + PCIR_MSIX_TABLE, 4U);
msix->table_bar = table_info & PCIM_MSIX_BIR_MASK;
msix->table_offset = table_info & ~PCIM_MSIX_BIR_MASK;
msix->table_count = (msgctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1U;
msix->table_bar = pdev->msix.table_bar;
msix->table_offset = pdev->msix.table_offset;
msix->table_count = pdev->msix.table_count;
if (msix->table_bar < (PCI_BAR_COUNT - 1U)) {
/* Mask all table entries */
@@ -381,7 +332,12 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
msix->tables[i].data = 0U;
}
decode_msix_table_bar(vdev);
bar = &pdev->bar[msix->table_bar];
if (bar != NULL) {
vdev->msix.mmio_hva = (uint64_t)hpa2hva(bar->base);
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(bar->base);
vdev->msix.mmio_size = bar->size;
}
if (msix->mmio_gpa != 0U) {
/*
@@ -413,7 +369,7 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
} else {
pr_err("%s, MSI-X device (%x) invalid table BIR %d", __func__, vdev->pdev.bdf.value, msix->table_bar);
vdev->msix.capoff = 0U;
ret = -EIO;
ret = -EIO;
}
return ret;

View File

@@ -102,31 +102,32 @@ static void sharing_mode_cfgwrite(__unused struct acrn_vpci *vpci, union pci_bdf
}
}
static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, union pci_bdf bdf)
static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, const struct pci_pdev *pdev_ref)
{
struct pci_vdev *vdev;
struct pci_vdev *vdev = NULL;
if (num_pci_vdev < CONFIG_MAX_PCI_DEV_NUM) {
vdev = &sharing_mode_vdev_array[num_pci_vdev];
num_pci_vdev++;
/* vbdf equals to pbdf otherwise remapped */
vdev->vbdf = bdf;
vdev->vpci = &vm->vpci;
vdev->pdev.bdf = bdf;
} else {
vdev = NULL;
if ((vm != NULL) && (vdev != NULL) && (pdev_ref != NULL)) {
vdev->vpci = &vm->vpci;
/* vbdf equals to pbdf otherwise remapped */
vdev->vbdf = pdev_ref->bdf;
(void)memcpy_s((void *)&vdev->pdev, sizeof(struct pci_pdev),
(const void *)pdev_ref, sizeof(struct pci_pdev));
}
}
return vdev;
}
static void enumerate_pci_dev(uint16_t pbdf, const void *cb_data)
static void init_vdev_for_pdev(const struct pci_pdev *pdev, const void *cb_data)
{
const struct acrn_vm *vm = (const struct acrn_vm *)cb_data;
struct pci_vdev *vdev;
vdev = alloc_pci_vdev(vm, (union pci_bdf)pbdf);
vdev = alloc_pci_vdev(vm, pdev);
if (vdev != NULL) {
populate_msi_struct(vdev);
}
@@ -143,14 +144,10 @@ static int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
* IO/MMIO requests from non-sos_vm guests will be injected to device model.
*/
if (!is_sos_vm(vm)) {
ret = -ENODEV;
ret = -ENODEV;
} else {
/* Initialize PCI vdev array */
num_pci_vdev = 0U;
(void)memset((void *)sharing_mode_vdev_array, 0U, sizeof(sharing_mode_vdev_array));
/* build up vdev array for sos_vm */
pci_scan_bus(enumerate_pci_dev, vm);
/* Build up vdev array for sos_vm */
pci_pdev_foreach(init_vdev_for_pdev, vm);
for (i = 0U; i < num_pci_vdev; i++) {
vdev = &sharing_mode_vdev_array[i];