mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-02 02:03:20 +00:00
hv:fix violations of coding guideline C-ST-04
The coding guideline rule C-ST-04 requires that a 'if' statement followed by one or more 'else if' statement shall be terminated by an 'else' statement which contains either appropriate action or a comment. Tracked-On: #6776 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
parent
b7a99f4530
commit
3d5c3c4754
@ -212,6 +212,8 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
|
||||
enter_s3(vm, v, 0U);
|
||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1a == val) {
|
||||
enter_s5(vcpu, v, 0U);
|
||||
} else {
|
||||
/* other Sx value should be ignored */
|
||||
}
|
||||
}
|
||||
|
||||
@ -226,6 +228,8 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
|
||||
enter_s3(vm, pm1a_cnt_val, v);
|
||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1b == val) {
|
||||
enter_s5(vcpu, pm1a_cnt_val, v);
|
||||
} else {
|
||||
/* other Sx value should be ignored */
|
||||
}
|
||||
} else {
|
||||
/* the case broke ACPI spec */
|
||||
@ -396,10 +400,12 @@ void init_guest_pm(struct acrn_vm *vm)
|
||||
if (vm_load_pm_s_state(vm) == 0) {
|
||||
register_pm1ab_handler(vm);
|
||||
}
|
||||
} else if (is_postlaunched_vm(vm) && is_rt_vm(vm)) {
|
||||
/* Intercept the virtual pm port for post launched RTVM */
|
||||
register_rt_vm_pm1a_ctl_handler(vm);
|
||||
} else if (is_prelaunched_vm(vm)) {
|
||||
} else if (is_postlaunched_vm(vm)) {
|
||||
if (is_rt_vm(vm)) {
|
||||
/* Intercept the virtual pm port for post launched RTVM */
|
||||
register_rt_vm_pm1a_ctl_handler(vm);
|
||||
}
|
||||
} else { /* prelaunched vm */
|
||||
/* Intercept the virtual sleep control/status registers for pre-launched VM */
|
||||
register_prelaunched_vm_sleep_handler(vm);
|
||||
}
|
||||
|
@ -333,8 +333,10 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
|
||||
base_hpa += entry->length;
|
||||
remaining_hpa_size -= entry->length;
|
||||
}
|
||||
} else if (entry->type == E820_TYPE_RAM) {
|
||||
pr_warn("%s: HPA size incorrectly configured in v820\n", __func__);
|
||||
} else {
|
||||
if (entry->type == E820_TYPE_RAM) {
|
||||
pr_warn("%s: HPA size incorrectly configured in v820\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
if ((remaining_hpa_size == 0UL) && (is_hpa1)) {
|
||||
|
@ -1072,8 +1072,10 @@ static int32_t iommu_attach_device(const struct iommu_domain *domain, uint8_t bu
|
||||
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
|
||||
ret = 0;
|
||||
}
|
||||
} else if (is_dmar_unit_ignored(dmar_unit)) {
|
||||
ret = 0;
|
||||
} else {
|
||||
if (is_dmar_unit_ignored(dmar_unit)) {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1127,8 +1129,10 @@ static int32_t iommu_detach_device(const struct iommu_domain *domain, uint8_t bu
|
||||
dmar_invalid_iotlb(dmar_unit, vmid_to_domainid(domain->vm_id), 0UL, 0U, false,
|
||||
DMAR_IIRG_DOMAIN);
|
||||
}
|
||||
} else if (is_dmar_unit_ignored(dmar_unit)) {
|
||||
ret = 0;
|
||||
} else {
|
||||
if (is_dmar_unit_ignored(dmar_unit)) {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -86,6 +86,8 @@ int32_t init_multiboot_info(uint32_t *registers)
|
||||
ret = 0;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* Currently there are only multiboot and multiboot2 */
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -175,6 +175,8 @@ void get_cache_shift(uint32_t *l2_shift, uint32_t *l3_shift)
|
||||
*l2_shift = shift;
|
||||
} else if (cache_level == 3U) {
|
||||
*l3_shift = shift;
|
||||
} else {
|
||||
/* this api only for L2 & L3 cache */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -295,7 +295,8 @@ void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val)
|
||||
|
||||
if (is_pci_io_bar(vbar)) {
|
||||
vpci_update_one_vbar(vdev, idx, val, vdev_pt_allow_io_vbar, vdev_pt_deny_io_vbar);
|
||||
} else if (is_pci_mem_bar(vbar)) {
|
||||
} else {
|
||||
/* pci mem bar */
|
||||
vpci_update_one_vbar(vdev, idx, val, vdev_pt_map_mem_vbar, vdev_pt_unmap_mem_vbar);
|
||||
}
|
||||
}
|
||||
|
@ -151,17 +151,21 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
|
||||
vdev->vbars[idx].base_hpa, lo & PCI_BASE_ADDRESS_IO_MASK);
|
||||
base = 0UL;
|
||||
}
|
||||
} else if ((!is_pci_mem_bar_base_valid(vpci2vm(vdev->vpci), base))
|
||||
} else {
|
||||
if ((!is_pci_mem_bar_base_valid(vpci2vm(vdev->vpci), base))
|
||||
|| (!mem_aligned_check(base, vdev->vbars[idx].size))) {
|
||||
res = (base < (1UL << 32UL)) ? &(vdev->vpci->res32): &(vdev->vpci->res64);
|
||||
/* VM tries to reprogram vbar address out of pci mmio bar window, it can be caused by:
|
||||
* 1. For Service VM, <board>.xml is misaligned with the actual native platform, and we get wrong mmio window.
|
||||
* 2. Malicious operation from VM, it tries to reprogram vbar address out of pci mmio bar window
|
||||
*/
|
||||
pr_err("%s reprogram PCI:%02x:%02x.%x BAR%d to addr:0x%lx,"
|
||||
" which is out of mmio window[0x%lx - 0x%lx] or not aligned with size: 0x%lx",
|
||||
__func__, vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, idx, base, res->start,
|
||||
res->end, vdev->vbars[idx].size);
|
||||
res = (base < (1UL << 32UL)) ? &(vdev->vpci->res32) : &(vdev->vpci->res64);
|
||||
/* VM tries to reprogram vbar address out of pci mmio bar window, it can be caused by:
|
||||
* 1. For Service VM, <board>.xml is misaligned with the actual native platform,
|
||||
* and we get wrong mmio window.
|
||||
* 2. Malicious operation from VM, it tries to reprogram vbar address out of
|
||||
* pci mmio bar window
|
||||
*/
|
||||
pr_err("%s reprogram PCI:%02x:%02x.%x BAR%d to addr:0x%lx,"
|
||||
" which is out of mmio window[0x%lx - 0x%lx] or not aligned with size: 0x%lx",
|
||||
__func__, vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, idx, base,
|
||||
res->start, res->end, vdev->vbars[idx].size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,8 @@ static void map_vmcs9900_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||
(vbar->base_gpa + vbar->size), vdev, false);
|
||||
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vbar->base_gpa, vbar->size);
|
||||
vdev->msix.mmio_gpa = vbar->base_gpa;
|
||||
} else {
|
||||
/* No action required. */
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -90,8 +90,10 @@ uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
|
||||
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
|
||||
&mmio->value, (size_t)mmio->size);
|
||||
}
|
||||
} else if (mmio->direction == ACRN_IOREQ_DIR_READ) {
|
||||
mmio->value = 0UL;
|
||||
} else {
|
||||
if (mmio->direction == ACRN_IOREQ_DIR_READ) {
|
||||
mmio->value = 0UL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
|
||||
|
@ -766,6 +766,8 @@ static void pci_enumerate_ext_cap(struct pci_pdev *pdev)
|
||||
pr_err("%s: Do NOT enable PTM on [%x:%x.%x].\n", __func__,
|
||||
pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f);
|
||||
}
|
||||
} else {
|
||||
/* reserved for future use */
|
||||
}
|
||||
|
||||
pre_pos = pos;
|
||||
|
Loading…
Reference in New Issue
Block a user