mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-25 23:13:26 +00:00
hv: vpci: fix "Procedure has more than one exit point"
IEC 61508,ISO 26262 standards highly recommend single-exit rule. Tracked-On: #861 Signed-off-by: Zide Chen <zide.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
c547e9cfb4
commit
3515ca1e65
@ -118,6 +118,7 @@ static int32_t vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t by
|
||||
bool message_changed = false;
|
||||
bool enable;
|
||||
uint32_t msgctrl;
|
||||
int32_t ret = -ENODEV;
|
||||
|
||||
/* Writing MSI Capability Structure */
|
||||
if (msicap_access(vdev, offset)) {
|
||||
@ -144,10 +145,10 @@ static int32_t vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t by
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void populate_msi_struct(struct pci_vdev *vdev)
|
||||
@ -158,62 +159,60 @@ void populate_msi_struct(struct pci_vdev *vdev)
|
||||
union pci_bdf pbdf = vdev->pdev.bdf;
|
||||
|
||||
/* Has new Capabilities list? */
|
||||
if ((pci_pdev_read_cfg(pbdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) == 0U) {
|
||||
return;
|
||||
}
|
||||
if ((pci_pdev_read_cfg(pbdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) != 0U) {
|
||||
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, PCIR_CAP_PTR, 1U);
|
||||
while ((ptr != 0U) && (ptr != 0xFFU)) {
|
||||
cap = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_ID, 1U);
|
||||
|
||||
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, PCIR_CAP_PTR, 1U);
|
||||
while ((ptr != 0U) && (ptr != 0xFFU)) {
|
||||
cap = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_ID, 1U);
|
||||
/* Ignore all other Capability IDs for now */
|
||||
if ((cap == PCIY_MSI) || (cap == PCIY_MSIX)) {
|
||||
offset = ptr;
|
||||
if (cap == PCIY_MSI) {
|
||||
vdev->msi.capoff = offset;
|
||||
msgctrl = pci_pdev_read_cfg(pbdf, offset + PCIR_MSI_CTRL, 2U);
|
||||
|
||||
/* Ignore all other Capability IDs for now */
|
||||
if ((cap == PCIY_MSI) || (cap == PCIY_MSIX)) {
|
||||
offset = ptr;
|
||||
if (cap == PCIY_MSI) {
|
||||
vdev->msi.capoff = offset;
|
||||
msgctrl = pci_pdev_read_cfg(pbdf, offset + PCIR_MSI_CTRL, 2U);
|
||||
|
||||
/*
|
||||
* Ignore the 'mask' and 'pending' bits in the MSI capability
|
||||
* (msgctrl & PCIM_MSICTRL_VECTOR).
|
||||
* We'll let the guest manipulate them directly.
|
||||
*/
|
||||
len = ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) ? 14U : 10U;
|
||||
vdev->msi.caplen = len;
|
||||
|
||||
/* Assign MSI handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msi);
|
||||
} else {
|
||||
vdev->msix.capoff = offset;
|
||||
vdev->msix.caplen = MSIX_CAPLEN;
|
||||
len = vdev->msix.caplen;
|
||||
|
||||
/* Assign MSI-X handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msix);
|
||||
}
|
||||
|
||||
/* Copy MSI/MSI-X capability struct into virtual device */
|
||||
while (len > 0U) {
|
||||
bytes = (len >= 4U) ? 4U : len;
|
||||
val = pci_pdev_read_cfg(pbdf, offset, bytes);
|
||||
|
||||
if ((cap == PCIY_MSI) && (offset == vdev->msi.capoff)) {
|
||||
/*
|
||||
* Don't support multiple vector for now,
|
||||
* Force Multiple Message Enable and Multiple Message
|
||||
* Capable to 0
|
||||
* Ignore the 'mask' and 'pending' bits in the MSI capability
|
||||
* (msgctrl & PCIM_MSICTRL_VECTOR).
|
||||
* We'll let the guest manipulate them directly.
|
||||
*/
|
||||
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
|
||||
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
|
||||
len = ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) ? 14U : 10U;
|
||||
vdev->msi.caplen = len;
|
||||
|
||||
/* Assign MSI handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msi);
|
||||
} else {
|
||||
vdev->msix.capoff = offset;
|
||||
vdev->msix.caplen = MSIX_CAPLEN;
|
||||
len = vdev->msix.caplen;
|
||||
|
||||
/* Assign MSI-X handler for configuration read and write */
|
||||
add_vdev_handler(vdev, &pci_ops_vdev_msix);
|
||||
}
|
||||
|
||||
pci_vdev_write_cfg(vdev, offset, bytes, val);
|
||||
len -= bytes;
|
||||
offset += bytes;
|
||||
}
|
||||
}
|
||||
/* Copy MSI/MSI-X capability struct into virtual device */
|
||||
while (len > 0U) {
|
||||
bytes = (len >= 4U) ? 4U : len;
|
||||
val = pci_pdev_read_cfg(pbdf, offset, bytes);
|
||||
|
||||
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_NEXTPTR, 1U);
|
||||
if ((cap == PCIY_MSI) && (offset == vdev->msi.capoff)) {
|
||||
/*
|
||||
* Don't support multiple vector for now,
|
||||
* Force Multiple Message Enable and Multiple Message
|
||||
* Capable to 0
|
||||
*/
|
||||
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
|
||||
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
|
||||
}
|
||||
|
||||
pci_vdev_write_cfg(vdev, offset, bytes, val);
|
||||
len -= bytes;
|
||||
offset += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_NEXTPTR, 1U);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ static inline void enable_disable_msix(struct pci_vdev *vdev, bool enable)
|
||||
static int32_t vmsix_remap(struct pci_vdev *vdev, bool enable)
|
||||
{
|
||||
uint32_t index;
|
||||
int32_t ret;
|
||||
int32_t ret = 0;
|
||||
|
||||
/* disable MSI-X during configuration */
|
||||
enable_disable_msix(vdev, false);
|
||||
@ -107,17 +107,19 @@ static int32_t vmsix_remap(struct pci_vdev *vdev, bool enable)
|
||||
for (index = 0U; index < vdev->msix.table_count; index++) {
|
||||
ret = vmsix_remap_entry(vdev, index, enable);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If MSI Enable is being set, make sure INTxDIS bit is set */
|
||||
if (enable) {
|
||||
enable_disable_pci_intx(vdev->pdev.bdf, false);
|
||||
if (ret == 0) {
|
||||
if (enable) {
|
||||
enable_disable_pci_intx(vdev->pdev.bdf, false);
|
||||
}
|
||||
enable_disable_msix(vdev, enable);
|
||||
}
|
||||
enable_disable_msix(vdev, enable);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Do MSI-X remap for one MSI-X table entry only */
|
||||
@ -263,6 +265,7 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
||||
{
|
||||
struct mmio_request *mmio = &io_req->reqs.mmio;
|
||||
struct pci_vdev *vdev;
|
||||
int32_t ret = 0;
|
||||
uint64_t offset;
|
||||
uint64_t hva;
|
||||
|
||||
@ -277,30 +280,30 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
|
||||
/* Only DWORD and QWORD are permitted */
|
||||
if ((mmio->size != 4U) && (mmio->size != 8U)) {
|
||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
stac();
|
||||
/* MSI-X PBA and Capability Table could be in the same range */
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
/* mmio->size is either 4U or 8U */
|
||||
if (mmio->size == 4U) {
|
||||
mmio->value = (uint64_t)mmio_read32((const void *)hva);
|
||||
} else {
|
||||
mmio->value = mmio_read64((const void *)hva);
|
||||
}
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
/* mmio->size is either 4U or 8U */
|
||||
if (mmio->size == 4U) {
|
||||
mmio_write32((uint32_t)(mmio->value), (void *)hva);
|
||||
stac();
|
||||
/* MSI-X PBA and Capability Table could be in the same range */
|
||||
if (mmio->direction == REQUEST_READ) {
|
||||
/* mmio->size is either 4U or 8U */
|
||||
if (mmio->size == 4U) {
|
||||
mmio->value = (uint64_t)mmio_read32((const void *)hva);
|
||||
} else {
|
||||
mmio->value = mmio_read64((const void *)hva);
|
||||
}
|
||||
} else {
|
||||
mmio_write64(mmio->value, (void *)hva);
|
||||
/* mmio->size is either 4U or 8U */
|
||||
if (mmio->size == 4U) {
|
||||
mmio_write32((uint32_t)(mmio->value), (void *)hva);
|
||||
} else {
|
||||
mmio_write64(mmio->value, (void *)hva);
|
||||
}
|
||||
}
|
||||
clac();
|
||||
}
|
||||
clac();
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void decode_msix_table_bar(struct pci_vdev *vdev)
|
||||
|
@ -35,16 +35,17 @@ static struct pci_vdev sharing_mode_vdev_array[CONFIG_MAX_PCI_DEV_NUM];
|
||||
|
||||
struct pci_vdev *sharing_mode_find_vdev(union pci_bdf pbdf)
|
||||
{
|
||||
struct pci_vdev *vdev = NULL;
|
||||
uint32_t i;
|
||||
|
||||
/* in VM0, it uses phys BDF */
|
||||
for (i = 0U; i < num_pci_vdev; i++) {
|
||||
if (sharing_mode_vdev_array[i].pdev.bdf.value == pbdf.value) {
|
||||
return &sharing_mode_vdev_array[i];
|
||||
vdev = &sharing_mode_vdev_array[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return vdev;
|
||||
}
|
||||
|
||||
static void sharing_mode_cfgread(__unused struct acrn_vpci *vpci, union pci_bdf bdf,
|
||||
|
Loading…
Reference in New Issue
Block a user