mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-26 07:21:37 +00:00
hv: fix integer violations
The operands to shift operations (<<, >>) shall be unsigned integers. v1 -> v2: * replace 12U with CPU_PAGE_SHIFT when it is address shift case. * replace 6UL with 0x6UL Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
4d01e60eda
commit
60d0a75243
@ -76,7 +76,7 @@ static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
|
||||
bool phys;
|
||||
|
||||
/* get physical destination cpu mask */
|
||||
dest = (uint32_t)(info->vmsi_addr >> 12) & 0xffU;
|
||||
dest = (uint32_t)(info->vmsi_addr >> CPU_PAGE_SHIFT) & 0xffU;
|
||||
phys = ((info->vmsi_addr & MSI_ADDR_LOG) != MSI_ADDR_LOG);
|
||||
|
||||
calcvdest(vm, &vdmask, dest, phys);
|
||||
@ -796,7 +796,7 @@ static void get_entry_info(const struct ptdev_remapping_info *entry, char *type,
|
||||
if (is_entry_active(entry)) {
|
||||
if (entry->intr_type == PTDEV_INTR_MSI) {
|
||||
(void)strcpy_s(type, 16U, "MSI");
|
||||
*dest = (entry->msi.pmsi_addr & 0xFF000U) >> 12;
|
||||
*dest = (entry->msi.pmsi_addr & 0xFF000U) >> CPU_PAGE_SHIFT;
|
||||
if ((entry->msi.pmsi_data & APIC_TRIGMOD_LEVEL) != 0U) {
|
||||
*lvl_tm = true;
|
||||
} else {
|
||||
|
@ -235,7 +235,7 @@ static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
|
||||
goto out;
|
||||
}
|
||||
|
||||
index = (gva >> 30) & 0x3UL;
|
||||
index = (gva >> 30U) & 0x3UL;
|
||||
entry = base[index];
|
||||
|
||||
if ((entry & PAGE_PRESENT) == 0U) {
|
||||
|
@ -51,11 +51,11 @@
|
||||
#define VIE_OP_TYPE_TEST 15U
|
||||
|
||||
/* struct vie_op.op_flags */
|
||||
#define VIE_OP_F_IMM (1U << 0) /* 16/32-bit immediate operand */
|
||||
#define VIE_OP_F_IMM8 (1U << 1) /* 8-bit immediate operand */
|
||||
#define VIE_OP_F_MOFFSET (1U << 2) /* 16/32/64-bit immediate moffset */
|
||||
#define VIE_OP_F_NO_MODRM (1U << 3)
|
||||
#define VIE_OP_F_CHECK_GVA_DI (1U << 4) /* for movs, need to check DI */
|
||||
#define VIE_OP_F_IMM (1U << 0U) /* 16/32-bit immediate operand */
|
||||
#define VIE_OP_F_IMM8 (1U << 1U) /* 8-bit immediate operand */
|
||||
#define VIE_OP_F_MOFFSET (1U << 2U) /* 16/32/64-bit immediate moffset */
|
||||
#define VIE_OP_F_NO_MODRM (1U << 3U)
|
||||
#define VIE_OP_F_CHECK_GVA_DI (1U << 4U) /* for movs, need to check DI */
|
||||
|
||||
static const struct instr_emul_vie_op two_byte_opcodes[256] = {
|
||||
[0xB6] = {
|
||||
@ -411,8 +411,8 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
|
||||
* The value of the bit 47 in the 'gla' should be replicated in the
|
||||
* most significant 16 bits.
|
||||
*/
|
||||
mask = ~((1UL << 48) - 1);
|
||||
if ((gla & (1UL << 47)) != 0U) {
|
||||
mask = ~((1UL << 48U) - 1UL);
|
||||
if ((gla & (1UL << 47U)) != 0U) {
|
||||
return ((gla & mask) != mask) ? 1 : 0;
|
||||
} else {
|
||||
return ((gla & mask) != 0U) ? 1 : 0;
|
||||
@ -553,7 +553,7 @@ static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul
|
||||
* base register right by 8 bits (%ah = %rax >> 8).
|
||||
*/
|
||||
if (lhbr != 0) {
|
||||
reg_val = (uint8_t)(val >> 8);
|
||||
reg_val = (uint8_t)(val >> 8U);
|
||||
} else {
|
||||
reg_val = (uint8_t)val;
|
||||
}
|
||||
@ -578,8 +578,8 @@ static void vie_write_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vi
|
||||
* Shift left by 8 to store 'byte' in a legacy high
|
||||
* byte register.
|
||||
*/
|
||||
val <<= 8;
|
||||
mask <<= 8;
|
||||
val <<= 8U;
|
||||
mask <<= 8U;
|
||||
}
|
||||
val |= origval & ~mask;
|
||||
vm_set_register(vcpu, reg, val);
|
||||
@ -1852,9 +1852,9 @@ static int decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode)
|
||||
return -1;
|
||||
}
|
||||
|
||||
vie->mod = (x >> 6) & 0x3U;
|
||||
vie->rm = (x >> 0) & 0x7U;
|
||||
vie->reg = (x >> 3) & 0x7U;
|
||||
vie->mod = (x >> 6U) & 0x3U;
|
||||
vie->rm = (x >> 0U) & 0x7U;
|
||||
vie->reg = (x >> 3U) & 0x7U;
|
||||
|
||||
/*
|
||||
* A direct addressing mode makes no sense in the context of an EPT
|
||||
@ -1880,10 +1880,10 @@ static int decode_modrm(struct instr_emul_vie *vie, enum vm_cpu_mode cpu_mode)
|
||||
* this case.
|
||||
*/
|
||||
} else {
|
||||
vie->rm |= (vie->rex_b << 3);
|
||||
vie->rm |= (vie->rex_b << 3U);
|
||||
}
|
||||
|
||||
vie->reg |= (vie->rex_r << 3);
|
||||
vie->reg |= (vie->rex_r << 3U);
|
||||
|
||||
/* SIB */
|
||||
if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) {
|
||||
@ -1940,13 +1940,13 @@ static int decode_sib(struct instr_emul_vie *vie)
|
||||
}
|
||||
|
||||
/* De-construct the SIB byte */
|
||||
vie->ss = (x >> 6) & 0x3U;
|
||||
vie->index = (x >> 3) & 0x7U;
|
||||
vie->base = (x >> 0) & 0x7U;
|
||||
vie->ss = (x >> 6U) & 0x3U;
|
||||
vie->index = (x >> 3U) & 0x7U;
|
||||
vie->base = (x >> 0U) & 0x7U;
|
||||
|
||||
/* Apply the REX prefix modifiers */
|
||||
vie->index |= vie->rex_x << 3;
|
||||
vie->base |= vie->rex_b << 3;
|
||||
vie->index |= vie->rex_x << 3U;
|
||||
vie->base |= vie->rex_b << 3U;
|
||||
|
||||
switch (vie->mod) {
|
||||
case VIE_MOD_INDIRECT_DISP8:
|
||||
|
@ -1000,7 +1000,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
|
||||
* In the "Cluster Model" the MDA is used to identify a
|
||||
* specific cluster and a set of APICs in that cluster.
|
||||
*/
|
||||
mda_cluster_id = (dest >> 4) & 0xfU;
|
||||
mda_cluster_id = (dest >> 4U) & 0xfU;
|
||||
mda_cluster_ldest = dest & 0xfU;
|
||||
|
||||
/*
|
||||
@ -1017,13 +1017,13 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
|
||||
|
||||
if ((dfr & APIC_DFR_MODEL_MASK) ==
|
||||
APIC_DFR_MODEL_FLAT) {
|
||||
ldest = ldr >> 24;
|
||||
ldest = ldr >> 24U;
|
||||
mda_ldest = mda_flat_ldest;
|
||||
} else if ((dfr & APIC_DFR_MODEL_MASK) ==
|
||||
APIC_DFR_MODEL_CLUSTER) {
|
||||
|
||||
cluster = ldr >> 28;
|
||||
ldest = (ldr >> 24) & 0xfU;
|
||||
cluster = ldr >> 28U;
|
||||
ldest = (ldr >> 24U) & 0xfU;
|
||||
|
||||
if (cluster != mda_cluster_id) {
|
||||
continue;
|
||||
@ -1405,7 +1405,7 @@ vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg,
|
||||
case APIC_OFFSET_ISR5:
|
||||
case APIC_OFFSET_ISR6:
|
||||
case APIC_OFFSET_ISR7:
|
||||
i = (offset - APIC_OFFSET_ISR0) >> 4;
|
||||
i = (offset - APIC_OFFSET_ISR0) >> 4U;
|
||||
*data = lapic->isr[i].v;
|
||||
break;
|
||||
case APIC_OFFSET_TMR0:
|
||||
@ -1416,7 +1416,7 @@ vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg,
|
||||
case APIC_OFFSET_TMR5:
|
||||
case APIC_OFFSET_TMR6:
|
||||
case APIC_OFFSET_TMR7:
|
||||
i = (offset - APIC_OFFSET_TMR0) >> 4;
|
||||
i = (offset - APIC_OFFSET_TMR0) >> 4U;
|
||||
*data = lapic->tmr[i].v;
|
||||
break;
|
||||
case APIC_OFFSET_IRR0:
|
||||
@ -1427,7 +1427,7 @@ vlapic_read(struct acrn_vlapic *vlapic, uint32_t offset_arg,
|
||||
case APIC_OFFSET_IRR5:
|
||||
case APIC_OFFSET_IRR6:
|
||||
case APIC_OFFSET_IRR7:
|
||||
i = (offset - APIC_OFFSET_IRR0) >> 4;
|
||||
i = (offset - APIC_OFFSET_IRR0) >> 4U;
|
||||
*data = lapic->irr[i].v;
|
||||
break;
|
||||
case APIC_OFFSET_ESR:
|
||||
@ -2155,7 +2155,7 @@ vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
|
||||
val = atomic_readandclear64(&pir_desc->pir[i]);
|
||||
if (val != 0UL) {
|
||||
irr[i * 2U].v |= (uint32_t)val;
|
||||
irr[(i * 2U) + 1U].v |= (uint32_t)(val >> 32);
|
||||
irr[(i * 2U) + 1U].v |= (uint32_t)(val >> 32U);
|
||||
|
||||
pirbase = 64U*i;
|
||||
pirval = val;
|
||||
|
@ -112,7 +112,7 @@ int check_vmx_mmu_cap(void)
|
||||
/* Read the MSR register of EPT and VPID Capability - SDM A.10 */
|
||||
val = msr_read(MSR_IA32_VMX_EPT_VPID_CAP);
|
||||
vmx_caps.ept = (uint32_t) val;
|
||||
vmx_caps.vpid = (uint32_t) (val >> 32);
|
||||
vmx_caps.vpid = (uint32_t) (val >> 32U);
|
||||
|
||||
if (!cpu_has_vmx_ept_cap(VMX_EPT_INVEPT)) {
|
||||
pr_fatal("%s, invept not supported\n", __func__);
|
||||
|
@ -310,11 +310,11 @@ void switch_world(struct vcpu *vcpu, int next_world)
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
hva2hpa(vcpu->vm->arch_vm.nworld_eptp) |
|
||||
(3UL << 3) | 6UL);
|
||||
(3UL << 3U) | 0x6UL);
|
||||
} else {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
hva2hpa(vcpu->vm->arch_vm.sworld_eptp) |
|
||||
(3UL << 3) | 6UL);
|
||||
(3UL << 3U) | 0x6UL);
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
@ -412,9 +412,9 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
switch (boot_param.version) {
|
||||
case TRUSTY_VERSION_2:
|
||||
trusty_entry_gpa = ((uint64_t)boot_param.entry_point) |
|
||||
(((uint64_t)boot_param.entry_point_high) << 32);
|
||||
(((uint64_t)boot_param.entry_point_high) << 32U);
|
||||
trusty_base_gpa = ((uint64_t)boot_param.base_addr) |
|
||||
(((uint64_t)boot_param.base_addr_high) << 32);
|
||||
(((uint64_t)boot_param.base_addr_high) << 32U);
|
||||
|
||||
/* copy rpmb_key from OSloader */
|
||||
(void)memcpy_s(&g_key_info.rpmb_key[0][0], 64U,
|
||||
@ -438,7 +438,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
trusty_base_hpa = vm->sworld_control.sworld_memory.base_hpa;
|
||||
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
hva2hpa(vm->arch_vm.sworld_eptp) | (3UL << 3) | 6UL);
|
||||
hva2hpa(vm->arch_vm.sworld_eptp) | (3UL << 3U) | 0x6UL);
|
||||
|
||||
/* save Normal World context */
|
||||
save_world_ctx(vcpu, &vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx);
|
||||
|
@ -364,7 +364,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
|
||||
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
|
||||
(((intr_info & VMX_INT_TYPE_MASK) >> 8)
|
||||
(((intr_info & VMX_INT_TYPE_MASK) >> 8U)
|
||||
!= VMX_INT_TYPE_EXT_INT)) {
|
||||
pr_err("Invalid VM exit interrupt info:%x", intr_info);
|
||||
vcpu_retain_rip(vcpu);
|
||||
|
@ -735,7 +735,7 @@ static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req)
|
||||
|
||||
vmx_msr = msr_read(msr);
|
||||
vmx_msr_low = (uint32_t)vmx_msr;
|
||||
vmx_msr_high = (uint32_t)(vmx_msr >> 32);
|
||||
vmx_msr_high = (uint32_t)(vmx_msr >> 32U);
|
||||
pr_dbg("VMX_PIN_VM_EXEC_CONTROLS:low=0x%x, high=0x%x\n",
|
||||
vmx_msr_low, vmx_msr_high);
|
||||
|
||||
|
@ -71,12 +71,12 @@ dmar_set_bitslice(uint64_t var, uint64_t mask,
|
||||
#define DMAR_MSI_DELIVERY_LOWPRI (1U << DMAR_MSI_DELIVERY_MODE_SHIFT)
|
||||
|
||||
/* Fault event MSI address register */
|
||||
#define DMAR_MSI_DEST_MODE_SHIFT (2)
|
||||
#define DMAR_MSI_DEST_MODE_PHYS (0 << DMAR_MSI_DEST_MODE_SHIFT)
|
||||
#define DMAR_MSI_DEST_MODE_LOGIC (1 << DMAR_MSI_DEST_MODE_SHIFT)
|
||||
#define DMAR_MSI_REDIRECTION_SHIFT (3)
|
||||
#define DMAR_MSI_REDIRECTION_CPU (0 << DMAR_MSI_REDIRECTION_SHIFT)
|
||||
#define DMAR_MSI_REDIRECTION_LOWPRI (1 << DMAR_MSI_REDIRECTION_SHIFT)
|
||||
#define DMAR_MSI_DEST_MODE_SHIFT (2U)
|
||||
#define DMAR_MSI_DEST_MODE_PHYS (0U << DMAR_MSI_DEST_MODE_SHIFT)
|
||||
#define DMAR_MSI_DEST_MODE_LOGIC (1U << DMAR_MSI_DEST_MODE_SHIFT)
|
||||
#define DMAR_MSI_REDIRECTION_SHIFT (3U)
|
||||
#define DMAR_MSI_REDIRECTION_CPU (0U << DMAR_MSI_REDIRECTION_SHIFT)
|
||||
#define DMAR_MSI_REDIRECTION_LOWPRI (1U << DMAR_MSI_REDIRECTION_SHIFT)
|
||||
|
||||
enum dmar_cirg_type {
|
||||
DMAR_CIRG_RESERVED = 0,
|
||||
@ -748,8 +748,8 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
||||
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
|
||||
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write",
|
||||
dma_frcd_up_fr(high),
|
||||
dma_frcd_up_sid(high) >> 8,
|
||||
(dma_frcd_up_sid(high) >> 3) & 0x1fUL,
|
||||
dma_frcd_up_sid(high) >> 8U,
|
||||
(dma_frcd_up_sid(high) >> 3U) & 0x1fUL,
|
||||
dma_frcd_up_sid(high) & 0x7UL,
|
||||
low);
|
||||
#if DBG_IOMMU
|
||||
@ -930,13 +930,13 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
|
||||
dmar_uint = device_to_dmaru(segment, bus, devfun);
|
||||
if (dmar_uint == NULL) {
|
||||
pr_err("no dmar unit found for device:0x%x:%x.%x",
|
||||
bus, devfun >> 3, devfun & 0x7U);
|
||||
bus, devfun >> 3U, devfun & 0x7U);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (dmar_uint->drhd->ignore) {
|
||||
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x",
|
||||
bus, devfun >> 3, devfun & 0x7U);
|
||||
bus, devfun >> 3U, devfun & 0x7U);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -992,7 +992,7 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
|
||||
pr_err("%s: context entry@0x%llx (Lower:%x) ",
|
||||
__func__, context_entry, context_entry->lower);
|
||||
pr_err("already present for %x:%x.%x",
|
||||
bus, devfun >> 3, devfun & 0x7U);
|
||||
bus, devfun >> 3U, devfun & 0x7U);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -417,7 +417,7 @@ static void vpic_set_pinstate(struct acrn_vpic *vpic, uint8_t pin,
|
||||
i8259->pin_state[pin & 0x7U] = 0U;
|
||||
}
|
||||
|
||||
lvl_trigger = ((vpic->i8259[pin >> 3U].elc & (1U << (pin & 0x7U))) != 0);
|
||||
lvl_trigger = ((vpic->i8259[pin >> 3U].elc & (1U << (pin & 0x7U))) != 0U);
|
||||
|
||||
if (((old_lvl == 0U) && (level == 1U)) ||
|
||||
((level == 1U) && (lvl_trigger == true))) {
|
||||
@ -552,7 +552,7 @@ static void vpic_pin_accepted(struct i8259_reg_state *i8259, uint8_t pin)
|
||||
{
|
||||
i8259->intr_raised = false;
|
||||
|
||||
if ((i8259->elc & (1U << pin)) == 0) {
|
||||
if ((i8259->elc & (1U << pin)) == 0U) {
|
||||
/*only used edge trigger mode*/
|
||||
i8259->request &= ~(uint8_t)(1U << pin);
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ struct vm_arch {
|
||||
} __aligned(CPU_PAGE_SIZE);
|
||||
|
||||
|
||||
#define CPUID_CHECK_SUBLEAF (1U << 0)
|
||||
#define CPUID_CHECK_SUBLEAF (1U << 0U)
|
||||
#define MAX_VM_VCPUID_ENTRIES 64U
|
||||
struct vcpuid_entry {
|
||||
uint32_t eax;
|
||||
|
@ -64,6 +64,6 @@ static inline uint64_t rdtsc(void)
|
||||
uint32_t lo, hi;
|
||||
|
||||
asm volatile("rdtsc" : "=a" (lo), "=d" (hi));
|
||||
return ((uint64_t)hi << 32) | lo;
|
||||
return ((uint64_t)hi << 32U) | lo;
|
||||
}
|
||||
#endif /* RTL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user