mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 04:02:05 +00:00
hv: refine pass-thru device specific vmsix function
- write_vmsix_cap_reg(): emulates vmsix cap writes. write_pt_vmsix_cap_reg(): emulates msix cap write for PT devices. - rw_vmsix_table(): emulates vmsix table bar space access. - vmsix_handle_table_mmio_access(): emulates the vmsix bar space access only. - pt_vmsix_handle_table_mmio_access(): emulates the vmsix bar space access and remap msi entry for PT device if write operation is executed. - rename 'init_vmsix()' and 'deinit_vmsix()' to 'init_vmsix_pt()' and 'deinit_vmsix_pt()' respectively, they're for PT devices only. - remove below 2 functions,call 'pci_vdev_read_vcfg()' directly in cases they're used. - 'read_vmsi_cap_reg()' - 'read_vmsix_cap_reg()' Tracked-On: #5407 Signed-off-by: Yonghua Huang <yonghua.huang@intel.com> Reviewed-by: Li, Fei <fei1.li@intel.com> Reviewed-by: Wang, Yu1 <yu1.wang@intel.com> Acked-by: Eddie Done <eddie.dong@intel.com>
This commit is contained in:
parent
1a252b5f18
commit
cdfc82f03b
@ -53,19 +53,11 @@ static inline struct msix_table_entry *get_msix_table_entry(const struct pci_vde
|
|||||||
* @pre vdev != NULL
|
* @pre vdev != NULL
|
||||||
* @pre vdev->pdev != NULL
|
* @pre vdev->pdev != NULL
|
||||||
*/
|
*/
|
||||||
void write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
void write_pt_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
||||||
{
|
{
|
||||||
static const uint8_t msix_ro_mask[12U] = {
|
uint32_t msgctrl;
|
||||||
0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
|
|
||||||
0xffU, 0xffU, 0xffU, 0xffU,
|
|
||||||
0xffU, 0xffU, 0xffU, 0xffU };
|
|
||||||
uint32_t msgctrl, old, ro_mask = ~0U;
|
|
||||||
|
|
||||||
(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
|
|
||||||
if (ro_mask != ~0U) {
|
|
||||||
old = pci_vdev_read_vcfg(vdev, offset, bytes);
|
|
||||||
pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
|
|
||||||
|
|
||||||
|
if (write_vmsix_cap_reg(vdev, offset, bytes, val)) {
|
||||||
msgctrl = pci_vdev_read_vcfg(vdev, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
|
msgctrl = pci_vdev_read_vcfg(vdev, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
|
||||||
/* If MSI Enable is being set, make sure INTxDIS bit is set */
|
/* If MSI Enable is being set, make sure INTxDIS bit is set */
|
||||||
if ((msgctrl & PCIM_MSIXCTRL_MSIX_ENABLE) != 0U) {
|
if ((msgctrl & PCIM_MSIXCTRL_MSIX_ENABLE) != 0U) {
|
||||||
@ -131,105 +123,26 @@ static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @pre vdev != NULL
|
|
||||||
* @pre mmio != NULL
|
|
||||||
*/
|
|
||||||
static void rw_vmsix_table(struct pci_vdev *vdev, struct mmio_request *mmio, uint32_t offset)
|
|
||||||
{
|
|
||||||
struct msix_table_entry *entry;
|
|
||||||
uint32_t entry_offset, table_offset, index;
|
|
||||||
|
|
||||||
/* Find out which entry it's accessing */
|
|
||||||
table_offset = offset - vdev->msix.table_offset;
|
|
||||||
index = table_offset / MSIX_TABLE_ENTRY_SIZE;
|
|
||||||
|
|
||||||
if (index < vdev->msix.table_count) {
|
|
||||||
entry = &vdev->msix.table_entries[index];
|
|
||||||
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
|
|
||||||
|
|
||||||
if (mmio->direction == REQUEST_READ) {
|
|
||||||
(void)memcpy_s(&mmio->value, (size_t)mmio->size,
|
|
||||||
(void *)entry + entry_offset, (size_t)mmio->size);
|
|
||||||
} else {
|
|
||||||
/* Only DWORD and QWORD are permitted */
|
|
||||||
if ((mmio->size == 4U) || (mmio->size == 8U)) {
|
|
||||||
/* Write to pci_vdev */
|
|
||||||
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
|
|
||||||
&mmio->value, (size_t)mmio->size);
|
|
||||||
if (vdev->msix.is_vmsix_on_msi) {
|
|
||||||
remap_one_vmsix_entry_on_msi(vdev, index);
|
|
||||||
} else {
|
|
||||||
remap_one_vmsix_entry(vdev, index);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pr_err("%s, invalid arguments %lx - %lx", __func__, mmio->value, mmio->address);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @pre io_req != NULL
|
* @pre io_req != NULL
|
||||||
* @pre handler_private_data != NULL
|
* @pre priv_data != NULL
|
||||||
*/
|
*/
|
||||||
static int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *handler_private_data)
|
static int32_t pt_vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data)
|
||||||
{
|
{
|
||||||
struct mmio_request *mmio = &io_req->reqs.mmio;
|
struct mmio_request *mmio = &io_req->reqs.mmio;
|
||||||
struct pci_vdev *vdev;
|
struct pci_vdev *vdev;
|
||||||
|
uint32_t index;
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
uint64_t offset;
|
|
||||||
void *hva;
|
|
||||||
|
|
||||||
vdev = (struct pci_vdev *)handler_private_data;
|
vdev = (struct pci_vdev *)priv_data;
|
||||||
/* This device has not be assigned to other OS */
|
|
||||||
if (vdev->user == vdev) {
|
if (vdev->user == vdev) {
|
||||||
offset = mmio->address - vdev->msix.mmio_gpa;
|
index = rw_vmsix_table(vdev, io_req);
|
||||||
|
|
||||||
if (msixtable_access(vdev, (uint32_t)offset)) {
|
if ((mmio->direction == REQUEST_WRITE) && (index < vdev->msix.table_count)) {
|
||||||
rw_vmsix_table(vdev, mmio, (uint32_t)offset);
|
if (vdev->msix.is_vmsix_on_msi) {
|
||||||
} else if (vdev->msix.is_vmsix_on_msi) {
|
remap_one_vmsix_entry_on_msi(vdev, index);
|
||||||
/* According to PCI spec, PBA is read-only.
|
|
||||||
* Don't emulate PBA according to the device status, just return 0.
|
|
||||||
*/
|
|
||||||
if (mmio->direction == REQUEST_READ) {
|
|
||||||
mmio->value = 0UL;
|
|
||||||
} else {
|
} else {
|
||||||
ret = -EINVAL;
|
remap_one_vmsix_entry(vdev, index);
|
||||||
}
|
|
||||||
} else {
|
|
||||||
hva = hpa2hva(vdev->msix.mmio_hpa + offset);
|
|
||||||
|
|
||||||
/* Only DWORD and QWORD are permitted */
|
|
||||||
if ((mmio->size == 4U) || (mmio->size == 8U)) {
|
|
||||||
if (hva != NULL) {
|
|
||||||
stac();
|
|
||||||
/* MSI-X PBA and Capability Table could be in the same range */
|
|
||||||
if (mmio->direction == REQUEST_READ) {
|
|
||||||
/* mmio->size is either 4U or 8U */
|
|
||||||
if (mmio->size == 4U) {
|
|
||||||
mmio->value = (uint64_t)mmio_read32((const void *)hva);
|
|
||||||
} else {
|
|
||||||
mmio->value = mmio_read64((const void *)hva);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* mmio->size is either 4U or 8U */
|
|
||||||
if (mmio->size == 4U) {
|
|
||||||
mmio_write32((uint32_t)(mmio->value), (void *)hva);
|
|
||||||
} else {
|
|
||||||
mmio_write64(mmio->value, (void *)hva);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clac();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -284,7 +197,7 @@ void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock)
|
|||||||
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
||||||
addr_lo = round_page_down(addr_lo);
|
addr_lo = round_page_down(addr_lo);
|
||||||
addr_hi = round_page_up(addr_hi);
|
addr_hi = round_page_up(addr_hi);
|
||||||
register_mmio_emulation_handler(vm, vmsix_handle_table_mmio_access,
|
register_mmio_emulation_handler(vm, pt_vmsix_handle_table_mmio_access,
|
||||||
addr_lo, addr_hi, vdev, hold_lock);
|
addr_lo, addr_hi, vdev, hold_lock);
|
||||||
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, addr_lo, addr_hi - addr_lo);
|
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, addr_lo, addr_hi - addr_lo);
|
||||||
msix->mmio_gpa = vbar->base_gpa;
|
msix->mmio_gpa = vbar->base_gpa;
|
||||||
@ -537,7 +450,7 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
|
|||||||
* @pre vdev != NULL
|
* @pre vdev != NULL
|
||||||
* @pre vdev->pdev != NULL
|
* @pre vdev->pdev != NULL
|
||||||
*/
|
*/
|
||||||
void init_vmsix(struct pci_vdev *vdev)
|
void init_vmsix_pt(struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
struct pci_pdev *pdev = vdev->pdev;
|
struct pci_pdev *pdev = vdev->pdev;
|
||||||
|
|
||||||
@ -557,7 +470,7 @@ void init_vmsix(struct pci_vdev *vdev)
|
|||||||
* @pre vdev != NULL
|
* @pre vdev != NULL
|
||||||
* @pre vdev->vpci != NULL
|
* @pre vdev->vpci != NULL
|
||||||
*/
|
*/
|
||||||
void deinit_vmsix(struct pci_vdev *vdev)
|
void deinit_vmsix_pt(struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
if (has_msix_cap(vdev)) {
|
if (has_msix_cap(vdev)) {
|
||||||
if (vdev->msix.table_count != 0U) {
|
if (vdev->msix.table_count != 0U) {
|
||||||
|
@ -95,15 +95,6 @@ static void remap_vmsi(const struct pci_vdev *vdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @pre vdev != NULL
|
|
||||||
*/
|
|
||||||
void read_vmsi_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
|
||||||
{
|
|
||||||
/* For PIO access, we emulate Capability Structures only */
|
|
||||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Writing MSI Capability Structure
|
* @brief Writing MSI Capability Structure
|
||||||
*
|
*
|
||||||
|
@ -36,10 +36,76 @@
|
|||||||
#include "vpci_priv.h"
|
#include "vpci_priv.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* @brief Writing MSI-X Capability Structure
|
||||||
|
*
|
||||||
* @pre vdev != NULL
|
* @pre vdev != NULL
|
||||||
|
* @pre vdev->pdev != NULL
|
||||||
*/
|
*/
|
||||||
void read_vmsix_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
||||||
{
|
{
|
||||||
/* For PIO access, we emulate Capability Structures only */
|
static const uint8_t msix_ro_mask[12U] = {
|
||||||
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
|
||||||
|
0xffU, 0xffU, 0xffU, 0xffU,
|
||||||
|
0xffU, 0xffU, 0xffU, 0xffU };
|
||||||
|
bool is_written = false;
|
||||||
|
uint32_t old, ro_mask = ~0U;
|
||||||
|
|
||||||
|
(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
|
||||||
|
if (ro_mask != ~0U) {
|
||||||
|
old = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||||
|
pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
|
||||||
|
is_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return is_written;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @pre vdev != NULL
|
||||||
|
* @pre io_req != NULL
|
||||||
|
* @pre mmio->address >= vdev->msix.mmio_gpa
|
||||||
|
*/
|
||||||
|
uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
|
||||||
|
{
|
||||||
|
struct mmio_request *mmio = &io_req->reqs.mmio;
|
||||||
|
struct msix_table_entry *entry;
|
||||||
|
uint32_t entry_offset, table_offset, index = CONFIG_MAX_MSIX_TABLE_NUM;
|
||||||
|
uint64_t offset;
|
||||||
|
|
||||||
|
/* Must be full DWORD or full QWORD aligned. */
|
||||||
|
if ((mmio->size == 4U) || (mmio->size == 8U)) {
|
||||||
|
offset = mmio->address - vdev->msix.mmio_gpa;
|
||||||
|
if (msixtable_access(vdev, (uint32_t)offset)) {
|
||||||
|
|
||||||
|
table_offset = (uint32_t)(offset - vdev->msix.table_offset);
|
||||||
|
index = table_offset / MSIX_TABLE_ENTRY_SIZE;
|
||||||
|
|
||||||
|
entry = &vdev->msix.table_entries[index];
|
||||||
|
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
|
||||||
|
|
||||||
|
if (mmio->direction == REQUEST_READ) {
|
||||||
|
(void)memcpy_s(&mmio->value, (size_t)mmio->size,
|
||||||
|
(void *)entry + entry_offset, (size_t)mmio->size);
|
||||||
|
} else {
|
||||||
|
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
|
||||||
|
&mmio->value, (size_t)mmio->size);
|
||||||
|
}
|
||||||
|
} else if (mmio->direction == REQUEST_READ) {
|
||||||
|
mmio->value = 0UL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @pre io_req != NULL
|
||||||
|
* @pre priv_data != NULL
|
||||||
|
*/
|
||||||
|
int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data)
|
||||||
|
{
|
||||||
|
(void)rw_vmsix_table((struct pci_vdev *)priv_data, io_req);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -360,12 +360,12 @@ static void vpci_init_pt_dev(struct pci_vdev *vdev)
|
|||||||
vdev->user = vdev;
|
vdev->user = vdev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here init_vdev_pt() needs to be called after init_vmsix() for the following reason:
|
* Here init_vdev_pt() needs to be called after init_vmsix_pt() for the following reason:
|
||||||
* init_vdev_pt() will indirectly call has_msix_cap(), which
|
* init_vdev_pt() will indirectly call has_msix_cap(), which
|
||||||
* requires init_vmsix() to be called first.
|
* requires init_vmsix_pt() to be called first.
|
||||||
*/
|
*/
|
||||||
init_vmsi(vdev);
|
init_vmsi(vdev);
|
||||||
init_vmsix(vdev);
|
init_vmsix_pt(vdev);
|
||||||
init_vsriov(vdev);
|
init_vsriov(vdev);
|
||||||
init_vdev_pt(vdev, false);
|
init_vdev_pt(vdev, false);
|
||||||
|
|
||||||
@ -376,7 +376,7 @@ static void vpci_deinit_pt_dev(struct pci_vdev *vdev)
|
|||||||
{
|
{
|
||||||
deinit_vdev_pt(vdev);
|
deinit_vdev_pt(vdev);
|
||||||
remove_vdev_pt_iommu_domain(vdev);
|
remove_vdev_pt_iommu_domain(vdev);
|
||||||
deinit_vmsix(vdev);
|
deinit_vmsix_pt(vdev);
|
||||||
deinit_vmsi(vdev);
|
deinit_vmsi(vdev);
|
||||||
|
|
||||||
vdev->user = NULL;
|
vdev->user = NULL;
|
||||||
@ -486,7 +486,7 @@ static int32_t write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
|
|||||||
if (vdev->msix.is_vmsix_on_msi) {
|
if (vdev->msix.is_vmsix_on_msi) {
|
||||||
write_vmsix_cap_reg_on_msi(vdev, offset, bytes, val);
|
write_vmsix_cap_reg_on_msi(vdev, offset, bytes, val);
|
||||||
} else {
|
} else {
|
||||||
write_vmsix_cap_reg(vdev, offset, bytes, val);
|
write_pt_vmsix_cap_reg(vdev, offset, bytes, val);
|
||||||
}
|
}
|
||||||
} else if (sriovcap_access(vdev, offset)) {
|
} else if (sriovcap_access(vdev, offset)) {
|
||||||
write_sriov_cap_reg(vdev, offset, bytes, val);
|
write_sriov_cap_reg(vdev, offset, bytes, val);
|
||||||
@ -511,10 +511,8 @@ static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
|
|||||||
|
|
||||||
if (cfg_header_access(offset)) {
|
if (cfg_header_access(offset)) {
|
||||||
read_cfg_header(vdev, offset, bytes, val);
|
read_cfg_header(vdev, offset, bytes, val);
|
||||||
} else if (msicap_access(vdev, offset)) {
|
} else if (msicap_access(vdev, offset) || msixcap_access(vdev, offset)) {
|
||||||
read_vmsi_cap_reg(vdev, offset, bytes, val);
|
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
|
||||||
} else if (msixcap_access(vdev, offset)) {
|
|
||||||
read_vmsix_cap_reg(vdev, offset, bytes, val);
|
|
||||||
} else if (sriovcap_access(vdev, offset)) {
|
} else if (sriovcap_access(vdev, offset)) {
|
||||||
read_sriov_cap_reg(vdev, offset, bytes, val);
|
read_sriov_cap_reg(vdev, offset, bytes, val);
|
||||||
} else {
|
} else {
|
||||||
|
@ -140,14 +140,14 @@ void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
|
|||||||
void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock);
|
void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock);
|
||||||
|
|
||||||
void init_vmsi(struct pci_vdev *vdev);
|
void init_vmsi(struct pci_vdev *vdev);
|
||||||
void read_vmsi_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
|
||||||
void write_vmsi_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
void write_vmsi_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
void deinit_vmsi(const struct pci_vdev *vdev);
|
void deinit_vmsi(const struct pci_vdev *vdev);
|
||||||
|
|
||||||
void init_vmsix(struct pci_vdev *vdev);
|
void init_vmsix_pt(struct pci_vdev *vdev);
|
||||||
void read_vmsix_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
void write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
void write_pt_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
void deinit_vmsix(struct pci_vdev *vdev);
|
uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req);
|
||||||
|
void deinit_vmsix_pt(struct pci_vdev *vdev);
|
||||||
|
|
||||||
void init_vmsix_on_msi(struct pci_vdev *vdev);
|
void init_vmsix_on_msi(struct pci_vdev *vdev);
|
||||||
void write_vmsix_cap_reg_on_msi(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
void write_vmsix_cap_reg_on_msi(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
|
Loading…
Reference in New Issue
Block a user