mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-20 16:28:10 +00:00
hv: refine pass-thru device specific vmsix function
- write_vmsix_cap_reg(): emulates vmsix cap writes. write_pt_vmsix_cap_reg(): emulates msix cap write for PT devices. - rw_vmsix_table(): emulates vmsix table bar space access. - vmsix_handle_table_mmio_access(): emulates the vmsix bar space access only. - pt_vmsix_handle_table_mmio_access(): emulates the vmsix bar space access and remap msi entry for PT device if write operation is executed. - rename 'init_vmsix()' and 'deinit_vmsix()' to 'init_vmsix_pt()' and 'deinit_vmsix_pt()' respectively, they're for PT devices only. - remove below 2 functions,call 'pci_vdev_read_vcfg()' directly in cases they're used. - 'read_vmsi_cap_reg()' - 'read_vmsix_cap_reg()' Tracked-On: #5407 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com> Reviewed-by: Li, Fei <fei1.li@intel.com> Reviewed-by: Wang, Yu1 <yu1.wang@intel.com> Acked-by: Eddie Done <eddie.dong@intel.com>
This commit is contained in:
@@ -36,10 +36,76 @@
|
||||
#include "vpci_priv.h"
|
||||
|
||||
/**
|
||||
* @brief Writing MSI-X Capability Structure
|
||||
*
|
||||
* @pre vdev != NULL
|
||||
* @pre vdev->pdev != NULL
|
||||
*/
|
||||
void read_vmsix_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
||||
bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
||||
{
|
||||
/* For PIO access, we emulate Capability Structures only */
|
||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
static const uint8_t msix_ro_mask[12U] = {
|
||||
0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
|
||||
0xffU, 0xffU, 0xffU, 0xffU,
|
||||
0xffU, 0xffU, 0xffU, 0xffU };
|
||||
bool is_written = false;
|
||||
uint32_t old, ro_mask = ~0U;
|
||||
|
||||
(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
|
||||
if (ro_mask != ~0U) {
|
||||
old = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||
pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
|
||||
is_written = true;
|
||||
}
|
||||
|
||||
return is_written;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vdev != NULL
|
||||
* @pre io_req != NULL
|
||||
* @pre mmio->address >= vdev->msix.mmio_gpa
|
||||
*/
|
||||
uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
|
||||
{
|
||||
struct mmio_request *mmio = &io_req->reqs.mmio;
|
||||
struct msix_table_entry *entry;
|
||||
uint32_t entry_offset, table_offset, index = CONFIG_MAX_MSIX_TABLE_NUM;
|
||||
uint64_t offset;
|
||||
|
||||
/* Must be full DWORD or full QWORD aligned. */
|
||||
if ((mmio->size == 4U) || (mmio->size == 8U)) {
|
||||
offset = mmio->address - vdev->msix.mmio_gpa;
|
||||
if (msixtable_access(vdev, (uint32_t)offset)) {
|
||||
|
||||
table_offset = (uint32_t)(offset - vdev->msix.table_offset);
|
||||
index = table_offset / MSIX_TABLE_ENTRY_SIZE;
|
||||
|
||||
entry = &vdev->msix.table_entries[index];
|
||||
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
|
||||
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
(void)memcpy_s(&mmio->value, (size_t)mmio->size,
|
||||
(void *)entry + entry_offset, (size_t)mmio->size);
|
||||
} else {
|
||||
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
|
||||
&mmio->value, (size_t)mmio->size);
|
||||
}
|
||||
} else if (mmio->direction == REQUEST_READ) {
|
||||
mmio->value = 0UL;
|
||||
}
|
||||
} else {
|
||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre io_req != NULL
|
||||
* @pre priv_data != NULL
|
||||
*/
|
||||
int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data)
|
||||
{
|
||||
(void)rw_vmsix_table((struct pci_vdev *)priv_data, io_req);
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user