mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 14:07:42 +00:00
hv: dm: fix "Procedure has more than one exit point"
IEC 61508,ISO 26262 standards highly recommend single-exit rule. Reduce the count of the "return entries". Fix the violations which is comply with the cases list below: 1.Function has 2 return entries. 2.The first return entry is used to return the error code of checking variable whether is valid. Fix the violations in "if else" format. V1->V2: make the return value match to int32_t Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
ab3d7c87fd
commit
8dfb9bd9c0
@ -86,26 +86,24 @@ vioapic_set_pinstate(struct acrn_vioapic *vioapic, uint16_t pin, uint32_t level)
|
||||
uint32_t old_lvl;
|
||||
union ioapic_rte rte;
|
||||
|
||||
if (pin >= REDIR_ENTRIES_HW) {
|
||||
return;
|
||||
}
|
||||
|
||||
rte = vioapic->rtbl[pin];
|
||||
old_lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
if (level == 0U) {
|
||||
/* clear pin_state and deliver interrupt according to polarity */
|
||||
bitmap_clear_nolock(pin & 0x3FU,
|
||||
&vioapic->pin_state[pin >> 6U]);
|
||||
if (((rte.full & IOAPIC_RTE_INTPOL) != 0UL)
|
||||
&& old_lvl != level) {
|
||||
vioapic_send_intr(vioapic, pin);
|
||||
}
|
||||
} else {
|
||||
/* set pin_state and deliver intrrupt according to polarity */
|
||||
bitmap_set_nolock(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
if (((rte.full & IOAPIC_RTE_INTPOL) == 0UL)
|
||||
&& old_lvl != level) {
|
||||
vioapic_send_intr(vioapic, pin);
|
||||
if (pin < REDIR_ENTRIES_HW) {
|
||||
rte = vioapic->rtbl[pin];
|
||||
old_lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
if (level == 0U) {
|
||||
/* clear pin_state and deliver interrupt according to polarity */
|
||||
bitmap_clear_nolock(pin & 0x3FU,
|
||||
&vioapic->pin_state[pin >> 6U]);
|
||||
if (((rte.full & IOAPIC_RTE_INTPOL) != 0UL)
|
||||
&& old_lvl != level) {
|
||||
vioapic_send_intr(vioapic, pin);
|
||||
}
|
||||
} else {
|
||||
/* set pin_state and deliver intrrupt according to polarity */
|
||||
bitmap_set_nolock(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
if (((rte.full & IOAPIC_RTE_INTPOL) == 0UL)
|
||||
&& old_lvl != level) {
|
||||
vioapic_send_intr(vioapic, pin);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -260,16 +258,18 @@ static inline bool vioapic_need_intr(const struct acrn_vioapic *vioapic, uint16_
|
||||
{
|
||||
uint32_t lvl;
|
||||
union ioapic_rte rte;
|
||||
bool ret;
|
||||
|
||||
if (pin >= REDIR_ENTRIES_HW) {
|
||||
return false;
|
||||
ret = false;
|
||||
} else {
|
||||
rte = vioapic->rtbl[pin];
|
||||
lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
ret = !!((((rte.full & IOAPIC_RTE_INTPOL) != 0UL) && lvl == 0U) ||
|
||||
(((rte.full & IOAPIC_RTE_INTPOL) == 0UL) && lvl != 0U));
|
||||
}
|
||||
|
||||
rte = vioapic->rtbl[pin];
|
||||
lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
|
||||
|
||||
return !!((((rte.full & IOAPIC_RTE_INTPOL) != 0UL) && lvl == 0U) ||
|
||||
(((rte.full & IOAPIC_RTE_INTPOL) == 0UL) && lvl != 0U));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -525,11 +525,15 @@ vioapic_init(struct acrn_vm *vm)
|
||||
uint32_t
|
||||
vioapic_pincount(const struct acrn_vm *vm)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (is_vm0(vm)) {
|
||||
return REDIR_ENTRIES_HW;
|
||||
ret = REDIR_ENTRIES_HW;
|
||||
} else {
|
||||
return VIOAPIC_RTE_NUM;
|
||||
ret = VIOAPIC_RTE_NUM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vioapic_mmio_access_handler(struct io_request *io_req, void *handler_private_data)
|
||||
|
@ -32,14 +32,17 @@
|
||||
|
||||
static inline bool msicap_access(struct pci_vdev *vdev, uint32_t offset)
|
||||
{
|
||||
bool ret;
|
||||
if (vdev->msi.capoff == 0U) {
|
||||
return 0;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = in_range(offset, vdev->msi.capoff, vdev->msi.caplen);
|
||||
}
|
||||
|
||||
return in_range(offset, vdev->msi.capoff, vdev->msi.caplen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmsi_remap(struct pci_vdev *vdev, bool enable)
|
||||
static int32_t vmsi_remap(struct pci_vdev *vdev, bool enable)
|
||||
{
|
||||
struct ptdev_msi_info info;
|
||||
union pci_bdf pbdf = vdev->pdev.bdf;
|
||||
@ -47,7 +50,7 @@ static int vmsi_remap(struct pci_vdev *vdev, bool enable)
|
||||
uint32_t capoff = vdev->msi.capoff;
|
||||
uint32_t msgctrl, msgdata;
|
||||
uint32_t addrlo, addrhi;
|
||||
int ret;
|
||||
int32_t ret;
|
||||
|
||||
/* Disable MSI during configuration */
|
||||
msgctrl = pci_vdev_read_cfg(vdev, capoff + PCIR_MSI_CTRL, 2U);
|
||||
@ -76,40 +79,41 @@ static int vmsi_remap(struct pci_vdev *vdev, bool enable)
|
||||
}
|
||||
|
||||
ret = ptdev_msix_remap(vm, vdev->vbdf.value, 0U, &info);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
if (ret == 0) {
|
||||
/* Update MSI Capability structure to physical device */
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR, 0x4U, (uint32_t)info.pmsi_addr);
|
||||
if ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) {
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U, (uint32_t)(info.pmsi_addr >> 32U));
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA_64BIT, 0x2U, (uint16_t)info.pmsi_data);
|
||||
} else {
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA, 0x2U, (uint16_t)info.pmsi_data);
|
||||
}
|
||||
|
||||
/* Update MSI Capability structure to physical device */
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR, 0x4U, (uint32_t)info.pmsi_addr);
|
||||
if ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) {
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U, (uint32_t)(info.pmsi_addr >> 32U));
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA_64BIT, 0x2U, (uint16_t)info.pmsi_data);
|
||||
} else {
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA, 0x2U, (uint16_t)info.pmsi_data);
|
||||
}
|
||||
|
||||
/* If MSI Enable is being set, make sure INTxDIS bit is set */
|
||||
if (enable) {
|
||||
enable_disable_pci_intx(pbdf, false);
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_CTRL, 2U, msgctrl | PCIM_MSICTRL_MSI_ENABLE);
|
||||
/* If MSI Enable is being set, make sure INTxDIS bit is set */
|
||||
if (enable) {
|
||||
enable_disable_pci_intx(pbdf, false);
|
||||
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_CTRL, 2U, msgctrl | PCIM_MSICTRL_MSI_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmsi_cfgread(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
||||
static int32_t vmsi_cfgread(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
|
||||
{
|
||||
int32_t ret;
|
||||
/* For PIO access, we emulate Capability Structures only */
|
||||
if (msicap_access(vdev, offset)) {
|
||||
*val = pci_vdev_read_cfg(vdev, offset, bytes);
|
||||
return 0;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
||||
static int32_t vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
|
||||
{
|
||||
bool message_changed = false;
|
||||
bool enable;
|
||||
@ -213,7 +217,7 @@ void populate_msi_struct(struct pci_vdev *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int vmsi_deinit(struct pci_vdev *vdev)
|
||||
static int32_t vmsi_deinit(struct pci_vdev *vdev)
|
||||
{
|
||||
if (vdev->msi.capoff != 0U) {
|
||||
ptdev_remove_msix_remapping(vdev->vpci->vm, vdev->vbdf.value, 1U);
|
||||
|
@ -59,20 +59,19 @@ static void sharing_mode_cfgread(__unused struct vpci *vpci, union pci_bdf bdf,
|
||||
/* vdev == NULL: Could be hit for PCI enumeration from guests */
|
||||
if ((vdev == NULL) || ((bytes != 1U) && (bytes != 2U) && (bytes != 4U))) {
|
||||
*val = ~0U;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0U; (i < vdev->nr_ops) && !handled; i++) {
|
||||
if (vdev->ops[i].cfgread != NULL) {
|
||||
if (vdev->ops[i].cfgread(vdev, offset, bytes, val) == 0) {
|
||||
handled = true;
|
||||
} else {
|
||||
for (i = 0U; (i < vdev->nr_ops) && !handled; i++) {
|
||||
if (vdev->ops[i].cfgread != NULL) {
|
||||
if (vdev->ops[i].cfgread(vdev, offset, bytes, val) == 0) {
|
||||
handled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Not handled by any handlers. Passthru to physical device */
|
||||
if (!handled) {
|
||||
*val = pci_pdev_read_cfg(vdev->pdev.bdf, offset, bytes);
|
||||
/* Not handled by any handlers. Passthru to physical device */
|
||||
if (!handled) {
|
||||
*val = pci_pdev_read_cfg(vdev->pdev.bdf, offset, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,23 +82,21 @@ static void sharing_mode_cfgwrite(__unused struct vpci *vpci, union pci_bdf bdf,
|
||||
bool handled = false;
|
||||
uint32_t i;
|
||||
|
||||
if ((bytes != 1U) && (bytes != 2U) && (bytes != 4U)) {
|
||||
return;
|
||||
}
|
||||
|
||||
vdev = sharing_mode_find_vdev(bdf);
|
||||
if (vdev != NULL) {
|
||||
for (i = 0U; (i < vdev->nr_ops) && !handled; i++) {
|
||||
if (vdev->ops[i].cfgwrite != NULL) {
|
||||
if (vdev->ops[i].cfgwrite(vdev, offset, bytes, val) == 0) {
|
||||
handled = true;
|
||||
if ((bytes == 1U) || (bytes == 2U) || (bytes == 4U)) {
|
||||
vdev = sharing_mode_find_vdev(bdf);
|
||||
if (vdev != NULL) {
|
||||
for (i = 0U; (i < vdev->nr_ops) && !handled; i++) {
|
||||
if (vdev->ops[i].cfgwrite != NULL) {
|
||||
if (vdev->ops[i].cfgwrite(vdev, offset, bytes, val) == 0) {
|
||||
handled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Not handled by any handlers. Passthru to physical device */
|
||||
if (!handled) {
|
||||
pci_pdev_write_cfg(vdev->pdev.bdf, offset, bytes, val);
|
||||
/* Not handled by any handlers. Passthru to physical device */
|
||||
if (!handled) {
|
||||
pci_pdev_write_cfg(vdev->pdev.bdf, offset, bytes, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -108,17 +105,17 @@ static struct pci_vdev *alloc_pci_vdev(struct acrn_vm *vm, union pci_bdf bdf)
|
||||
{
|
||||
struct pci_vdev *vdev;
|
||||
|
||||
if (num_pci_vdev >= CONFIG_MAX_PCI_DEV_NUM) {
|
||||
return NULL;
|
||||
if (num_pci_vdev < CONFIG_MAX_PCI_DEV_NUM) {
|
||||
vdev = &sharing_mode_vdev_array[num_pci_vdev++];
|
||||
|
||||
/* vbdf equals to pbdf otherwise remapped */
|
||||
vdev->vbdf = bdf;
|
||||
vdev->vpci = &vm->vpci;
|
||||
vdev->pdev.bdf = bdf;
|
||||
} else {
|
||||
vdev = NULL;
|
||||
}
|
||||
|
||||
vdev = &sharing_mode_vdev_array[num_pci_vdev++];
|
||||
|
||||
/* vbdf equals to pbdf otherwise remapped */
|
||||
vdev->vbdf = bdf;
|
||||
vdev->vpci = &vm->vpci;
|
||||
vdev->pdev.bdf = bdf;
|
||||
|
||||
return vdev;
|
||||
}
|
||||
|
||||
@ -133,36 +130,38 @@ static void enumerate_pci_dev(uint16_t pbdf, void *cb_data)
|
||||
}
|
||||
}
|
||||
|
||||
static int sharing_mode_vpci_init(struct acrn_vm *vm)
|
||||
static int32_t sharing_mode_vpci_init(struct acrn_vm *vm)
|
||||
{
|
||||
struct pci_vdev *vdev;
|
||||
uint32_t i, j;
|
||||
int32_t ret;
|
||||
|
||||
/*
|
||||
* Only setup IO bitmap for SOS.
|
||||
* IO/MMIO requests from non-vm0 guests will be injected to device model.
|
||||
*/
|
||||
if (!is_vm0(vm)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
/* Initialize PCI vdev array */
|
||||
num_pci_vdev = 0U;
|
||||
(void)memset((void *)sharing_mode_vdev_array, 0U, sizeof(sharing_mode_vdev_array));
|
||||
|
||||
/* Initialize PCI vdev array */
|
||||
num_pci_vdev = 0U;
|
||||
(void)memset((void *)sharing_mode_vdev_array, 0U, sizeof(sharing_mode_vdev_array));
|
||||
/* build up vdev array for vm0 */
|
||||
pci_scan_bus(enumerate_pci_dev, (void *)vm);
|
||||
|
||||
/* build up vdev array for vm0 */
|
||||
pci_scan_bus(enumerate_pci_dev, (void *)vm);
|
||||
|
||||
for (i = 0U; i < num_pci_vdev; i++) {
|
||||
vdev = &sharing_mode_vdev_array[i];
|
||||
for (j = 0U; j < vdev->nr_ops; j++) {
|
||||
if (vdev->ops[j].init != NULL) {
|
||||
(void)vdev->ops[j].init(vdev);
|
||||
for (i = 0U; i < num_pci_vdev; i++) {
|
||||
vdev = &sharing_mode_vdev_array[i];
|
||||
for (j = 0U; j < vdev->nr_ops; j++) {
|
||||
if (vdev->ops[j].init != NULL) {
|
||||
(void)vdev->ops[j].init(vdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sharing_mode_vpci_deinit(__unused struct acrn_vm *vm)
|
||||
@ -170,15 +169,13 @@ static void sharing_mode_vpci_deinit(__unused struct acrn_vm *vm)
|
||||
struct pci_vdev *vdev;
|
||||
uint32_t i, j;
|
||||
|
||||
if (!is_vm0(vm)) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0U; i < num_pci_vdev; i++) {
|
||||
vdev = &sharing_mode_vdev_array[i];
|
||||
for (j = 0U; j < vdev->nr_ops; j++) {
|
||||
if (vdev->ops[j].deinit != NULL) {
|
||||
(void)vdev->ops[j].deinit(vdev);
|
||||
if (is_vm0(vm)) {
|
||||
for (i = 0U; i < num_pci_vdev; i++) {
|
||||
vdev = &sharing_mode_vdev_array[i];
|
||||
for (j = 0U; j < vdev->nr_ops; j++) {
|
||||
if (vdev->ops[j].deinit != NULL) {
|
||||
(void)vdev->ops[j].deinit(vdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -188,10 +185,9 @@ void add_vdev_handler(struct pci_vdev *vdev, struct pci_vdev_ops *ops)
|
||||
{
|
||||
if (vdev->nr_ops >= (MAX_VPCI_DEV_OPS - 1U)) {
|
||||
pr_err("%s, adding too many handlers", __func__);
|
||||
return;
|
||||
} else {
|
||||
vdev->ops[vdev->nr_ops++] = *ops;
|
||||
}
|
||||
|
||||
vdev->ops[vdev->nr_ops++] = *ops;
|
||||
}
|
||||
|
||||
struct vpci_ops sharing_mode_vpci_ops = {
|
||||
@ -209,13 +205,12 @@ void vpci_set_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t
|
||||
if (vdev == NULL) {
|
||||
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
|
||||
target_vm->vm_id, vbdf, pbdf);
|
||||
return;
|
||||
} else {
|
||||
/* UOS may do BDF mapping */
|
||||
vdev->vpci = &target_vm->vpci;
|
||||
vdev->vbdf.value = vbdf;
|
||||
vdev->pdev.bdf.value = pbdf;
|
||||
}
|
||||
|
||||
/* UOS may do BDF mapping */
|
||||
vdev->vpci = &target_vm->vpci;
|
||||
vdev->vbdf.value = vbdf;
|
||||
vdev->pdev.bdf.value = pbdf;
|
||||
}
|
||||
|
||||
void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf)
|
||||
@ -227,12 +222,11 @@ void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16
|
||||
if (vdev == NULL) {
|
||||
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
|
||||
target_vm->vm_id, vbdf, pbdf);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return this PCI device to SOS */
|
||||
if (vdev->vpci->vm == target_vm) {
|
||||
vm = get_vm_from_vmid(0U);
|
||||
vdev->vpci = &vm->vpci;
|
||||
} else {
|
||||
/* Return this PCI device to SOS */
|
||||
if (vdev->vpci->vm == target_vm) {
|
||||
vm = get_vm_from_vmid(0U);
|
||||
vdev->vpci = &vm->vpci;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -147,12 +147,16 @@ static inline uint32_t pci_bar_offset(uint32_t idx)
|
||||
|
||||
static inline bool pci_bar_access(uint32_t offset)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if ((offset >= pci_bar_offset(0U))
|
||||
&& (offset < pci_bar_offset(PCI_BAR_COUNT))) {
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
return false;
|
||||
ret = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint8_t pci_bus(uint16_t bdf)
|
||||
|
@ -34,14 +34,14 @@
|
||||
|
||||
struct pci_vdev;
|
||||
struct pci_vdev_ops {
|
||||
int (*init)(struct pci_vdev *vdev);
|
||||
int32_t (*init)(struct pci_vdev *vdev);
|
||||
|
||||
int (*deinit)(struct pci_vdev *vdev);
|
||||
int32_t (*deinit)(struct pci_vdev *vdev);
|
||||
|
||||
int (*cfgwrite)(struct pci_vdev *vdev, uint32_t offset,
|
||||
uint32_t bytes, uint32_t val);
|
||||
|
||||
int (*cfgread)(struct pci_vdev *vdev, uint32_t offset,
|
||||
int32_t (*cfgread)(struct pci_vdev *vdev, uint32_t offset,
|
||||
uint32_t bytes, uint32_t *val);
|
||||
};
|
||||
|
||||
@ -125,7 +125,7 @@ struct pci_addr_info {
|
||||
};
|
||||
|
||||
struct vpci_ops {
|
||||
int (*init)(struct acrn_vm *vm);
|
||||
int32_t (*init)(struct acrn_vm *vm);
|
||||
void (*deinit)(struct acrn_vm *vm);
|
||||
void (*cfgread)(struct vpci *vpci, union pci_bdf vbdf, uint32_t offset,
|
||||
uint32_t bytes, uint32_t *val);
|
||||
|
Loading…
Reference in New Issue
Block a user