hv: fix integer violations

- Fix the integer violations related to the following rules:
  1. The operands to shift operations (<<, >>) shall be unsigned
     integers.
  2. The operands to bit operations (&, |, ~) shall be unsigned
     integers.

- Replace 12U with CPU_PAGE_SHIFT when it is address shift case.

v1 -> v2:
 * use existed MACRO to get bus/slot/func values
 * update PCI_SLOT MACRO to make it more straightforward
 * remove the incorrect replacement of 12U with CPU_PAGE_SHIFT
   dmar_fault_msi_write

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2018-11-06 08:41:54 +08:00 committed by lijinxia
parent 65a2613ab6
commit 366042cac2
10 changed files with 20 additions and 22 deletions

View File

@ -97,7 +97,7 @@ static void ptdev_build_physical_msi(struct acrn_vm *vm, struct ptdev_msi_info *
/* update physical dest mode & dest field */
info->pmsi_addr = info->vmsi_addr;
info->pmsi_addr &= ~0xFF00CU;
info->pmsi_addr |= (dest_mask << 12U) | MSI_ADDR_RH | MSI_ADDR_LOG;
info->pmsi_addr |= (dest_mask << CPU_PAGE_SHIFT) | MSI_ADDR_RH | MSI_ADDR_LOG;
dev_dbg(ACRN_DBG_IRQ, "MSI addr:data = 0x%llx:%x(V) -> 0x%llx:%x(P)",
info->vmsi_addr, info->vmsi_data,
@ -578,13 +578,9 @@ int ptdev_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
ptdev_build_physical_msi(vm, info, irq_to_vector(entry->allocated_pirq));
entry->msi = *info;
dev_dbg(ACRN_DBG_IRQ,
"PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
(virt_bdf >> 8) & 0xFFU, (virt_bdf >> 3) & 0x1FU,
(virt_bdf) & 0x7U, entry_nr,
info->vmsi_data & 0xFFU,
irq_to_vector(entry->allocated_pirq),
entry->vm->vm_id);
dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
PCI_BUS(virt_bdf), PCI_SLOT(virt_bdf), PCI_FUNC(virt_bdf), entry_nr,
info->vmsi_data & 0xFFU, irq_to_vector(entry->allocated_pirq), entry->vm->vm_id);
END:
return 0;
}

View File

@ -294,7 +294,7 @@ int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
* So we use DPL of SS access rights field for guest DPL.
*/
pw_info.is_user_mode_access =
(((exec_vmread32(VMX_GUEST_SS_ATTR)>>5) & 0x3U) == 3U);
(((exec_vmread32(VMX_GUEST_SS_ATTR) >> 5U) & 0x3U) == 3U);
pw_info.pse = true;
pw_info.nxe = ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_NXE_BIT) != 0UL);
pw_info.wp = ((vcpu_get_cr0(vcpu) & CR0_WP) != 0UL);

View File

@ -392,7 +392,7 @@ static void get_guest_paging_info(struct acrn_vcpu *vcpu, struct instr_emul_ctxt
{
uint8_t cpl;
cpl = (uint8_t)((csar >> 5) & 3U);
cpl = (uint8_t)((csar >> 5U) & 3U);
emul_ctxt->paging.cr3 = exec_vmread(VMX_GUEST_CR3);
emul_ctxt->paging.cpl = cpl;
emul_ctxt->paging.cpu_mode = get_vcpu_mode(vcpu);

View File

@ -167,10 +167,12 @@ static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia3
uint64_t cr0)
{
if (ia32_efer & MSR_IA32_EFER_LMA_BIT) {
if (cs_attr & 0x2000) /* CS.L = 1 */
if (cs_attr & 0x2000U) {
/* CS.L = 1 */
vcpu->arch.cpu_mode = CPU_MODE_64BIT;
else
} else {
vcpu->arch.cpu_mode = CPU_MODE_COMPATIBILITY;
}
} else if (cr0 & CR0_PE) {
vcpu->arch.cpu_mode = CPU_MODE_PROTECTED;
} else {

View File

@ -1037,7 +1037,7 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
lower = dmar_set_bitslice(lower,
CTX_ENTRY_LOWER_SLPTPTR_MASK,
CTX_ENTRY_LOWER_SLPTPTR_POS,
domain->trans_table_ptr >> 12U);
domain->trans_table_ptr >> CPU_PAGE_SHIFT);
lower = dmar_set_bitslice(lower,
CTX_ENTRY_LOWER_P_MASK,
CTX_ENTRY_LOWER_P_POS,

View File

@ -198,8 +198,8 @@ handle_dmar_devscope(struct dmar_dev_scope *dev_scope,
sizeof(struct acpi_dmar_pci_path);
bdf = dmar_path_bdf(path_len, apci_devscope->bus, path);
dev_scope->bus = (bdf >> 8) & 0xff;
dev_scope->devfun = bdf & 0xff;
dev_scope->bus = (bdf >> 8U) & 0xffU;
dev_scope->devfun = bdf & 0xffU;
return apci_devscope->length;
}

View File

@ -176,7 +176,7 @@ static void update_trampoline_code_refs(uint64_t dest_pa)
val = dest_pa + trampoline_relo_addr(&trampoline_fixup_target);
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_fixup_cs));
*(uint16_t *)(ptr) = (uint16_t)((val >> 4) & 0xFFFFU);
*(uint16_t *)(ptr) = (uint16_t)((val >> 4U) & 0xFFFFU);
ptr = hpa2hva(dest_pa + trampoline_relo_addr(&trampoline_fixup_ip));
*(uint16_t *)(ptr) = (uint16_t)(val & 0xfU);

View File

@ -812,7 +812,7 @@ int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
}
ret = assign_iommu_device(target_vm->iommu,
(uint8_t)(bdf >> 8), (uint8_t)(bdf & 0xffU));
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
return ret;
}
@ -843,7 +843,7 @@ int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
return -1;
}
ret = unassign_iommu_device(target_vm->iommu,
(uint8_t)(bdf >> 8), (uint8_t)(bdf & 0xffU));
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
return ret;
}

View File

@ -168,7 +168,7 @@ int general_sw_loader(struct acrn_vm *vm)
/* add "cma=XXXXM@0xXXXXXXXX" to cmdline*/
if (is_vm0(vm) && (e820_mem.max_ram_blk_size > 0)) {
snprintf(dyn_bootargs, 100U, " cma=%dM@0x%llx",
(e820_mem.max_ram_blk_size >> 20),
(e820_mem.max_ram_blk_size >> 20U),
e820_mem.max_ram_blk_base);
(void)strcpy_s((char *)hva
+ sw_linux->bootargs_size,
@ -183,10 +183,10 @@ int general_sw_loader(struct acrn_vm *vm)
int32_t reserving_1g_pages;
#ifdef CONFIG_REMAIN_1G_PAGES
reserving_1g_pages = (e820_mem.total_mem_size >> 30) -
reserving_1g_pages = (e820_mem.total_mem_size >> 30U) -
CONFIG_REMAIN_1G_PAGES;
#else
reserving_1g_pages = (e820_mem.total_mem_size >> 30) -
reserving_1g_pages = (e820_mem.total_mem_size >> 30U) -
3;
#endif
if (reserving_1g_pages > 0) {

View File

@ -50,7 +50,7 @@
#define PCI_REGMAX 0xFFU
#define PCI_BUS(bdf) (((bdf) >> 8U) & 0xFFU)
#define PCI_SLOT(bdf) (((bdf) >> 3U) & 0x1FU)
#define PCI_SLOT(bdf) (((bdf) & 0xFFU) >> 3U)
#define PCI_FUNC(bdf) ((bdf) & 0x7U)
/* I/O ports */