HV:guest:fix "signed/unsigned conversion without cast"

Misra C required signed/unsigned conversion with cast.

V1->V2:
  a.split patch to patch series

V2->V3:
  a.change the uint64_t type numeric constant's suffix from U to UL

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-07-03 18:34:25 +08:00 committed by lijinxia
parent a27f33f9bc
commit 8b94957774
11 changed files with 150 additions and 144 deletions

View File

@ -63,7 +63,7 @@ static inline int set_vcpuid_entry(struct vm *vm,
size_t entry_size = sizeof(struct vcpuid_entry);
if (vm->vcpuid_entry_nr == MAX_VM_VCPUID_ENTRIES) {
pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%d)\n",
pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%u)\n",
__func__, MAX_VM_VCPUID_ENTRIES);
return -ENOMEM;
}

View File

@ -127,8 +127,8 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
int i, index;
uint32_t shift;
int i;
uint32_t index, shift;
uint8_t *base;
uint64_t entry;
uint64_t addr, page_size;
@ -147,15 +147,15 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
goto out;
}
shift = i * pw_info->width + 12;
index = (gva >> shift) & ((1UL << pw_info->width) - 1);
shift = (uint32_t) i * pw_info->width + 12U;
index = (gva >> shift) & ((1UL << pw_info->width) - 1UL);
page_size = 1UL << shift;
if (pw_info->width == 10)
if (pw_info->width == 10U)
/* 32bit entry */
entry = *((uint32_t *)(base + 4 * index));
entry = *((uint32_t *)(base + 4U * index));
else
entry = *((uint64_t *)(base + 8 * index));
entry = *((uint64_t *)(base + 8U * index));
/* check if the entry present */
if ((entry & MMU_32BIT_PDE_P) == 0U) {
@ -259,7 +259,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
if ((gpa == NULL) || (err_code == NULL))
return -EINVAL;
*gpa = 0;
*gpa = 0UL;
pw_info.top_entry = cur_context->cr3;
pw_info.level = pm;
@ -302,12 +302,12 @@ static inline int32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
void *g_ptr;
hpa = _gpa2hpa(vm, gpa, &pg_size);
if (pg_size == 0) {
if (pg_size == 0U) {
pr_err("GPA2HPA not found");
return -EINVAL;
}
if (fix_pg_size != 0)
if (fix_pg_size != 0U)
pg_size = fix_pg_size;
off_in_pg = gpa & (pg_size - 1);
@ -327,7 +327,8 @@ static inline int32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
static inline int copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
uint32_t size, bool cp_from_vm)
{
int32_t len;
int32_t ret;
uint32_t len;
if (vm == NULL) {
pr_err("guest phy addr copy need vm param");
@ -335,14 +336,15 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
}
do {
len = _copy_gpa(vm, h_ptr, gpa, size, 0, cp_from_vm);
if (len < 0)
return len;
ret = _copy_gpa(vm, h_ptr, gpa, size, 0, cp_from_vm);
if (ret < 0)
return ret;
len = (uint32_t) ret;
gpa += len;
h_ptr += len;
size -= len;
} while (size > 0);
} while (size > 0U);
return 0;
}
@ -351,7 +353,8 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, bool cp_from_vm)
{
uint64_t gpa = 0;
int32_t len, ret;
int32_t ret;
uint32_t len;
if (vcpu == NULL) {
pr_err("guest virt addr copy need vcpu param");
@ -370,15 +373,16 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
return ret;
}
len = ret = _copy_gpa(vcpu->vm, h_ptr, gpa, size,
ret = _copy_gpa(vcpu->vm, h_ptr, gpa, size,
PAGE_SIZE_4K, cp_from_vm);
if (ret < 0)
return ret;
len = (uint32_t) ret;
gva += len;
h_ptr += len;
size -= len;
} while (size > 0);
} while (size > 0U);
return 0;
}
@ -413,7 +417,7 @@ int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
void init_e820(void)
{
unsigned int i;
uint32_t i;
if (boot_regs[0] == MULTIBOOT_INFO_MAGIC) {
struct multiboot_info *mbi = (struct multiboot_info *)
@ -435,7 +439,7 @@ void init_e820(void)
"mmap length 0x%x addr 0x%x entries %d\n",
mbi->mi_mmap_length, mbi->mi_mmap_addr,
e820_entries);
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
e820[i].baseaddr = mmap[i].baseaddr;
e820[i].length = mmap[i].length;
e820[i].type = mmap[i].type;
@ -455,16 +459,16 @@ void init_e820(void)
void obtain_e820_mem_info(void)
{
unsigned int i;
uint32_t i;
struct e820_entry *entry;
e820_mem.mem_bottom = UINT64_MAX;
e820_mem.mem_top = 0x00;
e820_mem.total_mem_size = 0;
e820_mem.max_ram_blk_base = 0;
e820_mem.max_ram_blk_size = 0;
e820_mem.mem_top = 0x0UL;
e820_mem.total_mem_size = 0UL;
e820_mem.max_ram_blk_base = 0UL;
e820_mem.max_ram_blk_size = 0UL;
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (e820_mem.mem_bottom > entry->baseaddr)
e820_mem.mem_bottom = entry->baseaddr;
@ -488,7 +492,7 @@ void obtain_e820_mem_info(void)
static void rebuild_vm0_e820(void)
{
unsigned int i;
uint32_t i;
uint64_t entry_start;
uint64_t entry_end;
uint64_t hv_start = CONFIG_RAM_START;
@ -498,7 +502,7 @@ static void rebuild_vm0_e820(void)
/* hypervisor mem need be filter out from e820 table
* it's hv itself + other hv reserved mem like vgt etc
*/
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
entry_start = entry->baseaddr;
entry_end = entry->baseaddr + entry->length;
@ -539,7 +543,7 @@ static void rebuild_vm0_e820(void)
}
if (new_entry.length > 0) {
if (new_entry.length > 0UL) {
e820_entries++;
ASSERT(e820_entries <= E820_MAX_ENTRIES,
"e820 entry overflow");
@ -562,7 +566,7 @@ static void rebuild_vm0_e820(void)
*/
int prepare_vm0_memmap_and_e820(struct vm *vm)
{
unsigned int i;
uint32_t i;
uint32_t attr_wb = (IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT |
@ -584,7 +588,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
MAP_MMIO, attr_uc);
/* update ram entries to WB attr */
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (entry->type == E820_TYPE_RAM)
ept_mmap(vm, entry->baseaddr, entry->baseaddr,
@ -593,7 +597,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
dev_dbg(ACRN_DBG_GUEST, "VM0 e820 layout:\n");
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
dev_dbg(ACRN_DBG_GUEST,
"e820 table: %d type: 0x%x", i, entry->type);
@ -618,7 +622,7 @@ uint64_t e820_alloc_low_memory(uint32_t size)
/* We want memory in page boundary and integral multiple of pages */
size = ROUND_PAGE_UP(size);
for (i = 0; i < e820_entries; i++) {
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
uint64_t start, end, length;

View File

@ -83,133 +83,133 @@ static const struct vie_op two_byte_opcodes[256] = {
static const struct vie_op one_byte_opcodes[256] = {
[0x0F] = {
.op_byte = 0x0F,
.op_byte = 0x0FU,
.op_type = VIE_OP_TYPE_TWO_BYTE
},
[0x2B] = {
.op_byte = 0x2B,
.op_byte = 0x2BU,
.op_type = VIE_OP_TYPE_SUB,
},
[0x39] = {
.op_byte = 0x39,
.op_byte = 0x39U,
.op_type = VIE_OP_TYPE_CMP,
},
[0x3B] = {
.op_byte = 0x3B,
.op_byte = 0x3BU,
.op_type = VIE_OP_TYPE_CMP,
},
[0x88] = {
.op_byte = 0x88,
.op_byte = 0x88U,
.op_type = VIE_OP_TYPE_MOV,
},
[0x89] = {
.op_byte = 0x89,
.op_byte = 0x89U,
.op_type = VIE_OP_TYPE_MOV,
},
[0x8A] = {
.op_byte = 0x8A,
.op_byte = 0x8AU,
.op_type = VIE_OP_TYPE_MOV,
},
[0x8B] = {
.op_byte = 0x8B,
.op_byte = 0x8BU,
.op_type = VIE_OP_TYPE_MOV,
},
[0xA1] = {
.op_byte = 0xA1,
.op_byte = 0xA1U,
.op_type = VIE_OP_TYPE_MOV,
.op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
},
[0xA3] = {
.op_byte = 0xA3,
.op_byte = 0xA3U,
.op_type = VIE_OP_TYPE_MOV,
.op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
},
[0xA4] = {
.op_byte = 0xA4,
.op_byte = 0xA4U,
.op_type = VIE_OP_TYPE_MOVS,
.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
},
[0xA5] = {
.op_byte = 0xA5,
.op_byte = 0xA5U,
.op_type = VIE_OP_TYPE_MOVS,
.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
},
[0xAA] = {
.op_byte = 0xAA,
.op_byte = 0xAAU,
.op_type = VIE_OP_TYPE_STOS,
.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
},
[0xAB] = {
.op_byte = 0xAB,
.op_byte = 0xABU,
.op_type = VIE_OP_TYPE_STOS,
.op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
},
[0xC6] = {
/* XXX Group 11 extended opcode - not just MOV */
.op_byte = 0xC6,
.op_byte = 0xC6U,
.op_type = VIE_OP_TYPE_MOV,
.op_flags = VIE_OP_F_IMM8,
},
[0xC7] = {
.op_byte = 0xC7,
.op_byte = 0xC7U,
.op_type = VIE_OP_TYPE_MOV,
.op_flags = VIE_OP_F_IMM,
},
[0x23] = {
.op_byte = 0x23,
.op_byte = 0x23U,
.op_type = VIE_OP_TYPE_AND,
},
[0x80] = {
/* Group 1 extended opcode */
.op_byte = 0x80,
.op_byte = 0x80U,
.op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM8,
},
[0x81] = {
/* Group 1 extended opcode */
.op_byte = 0x81,
.op_byte = 0x81U,
.op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM,
},
[0x83] = {
/* Group 1 extended opcode */
.op_byte = 0x83,
.op_byte = 0x83U,
.op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM8,
},
[0x84] = {
.op_byte = 0x84,
.op_byte = 0x84U,
.op_type = VIE_OP_TYPE_TEST,
},
[0x85] = {
.op_byte = 0x85,
.op_byte = 0x85U,
.op_type = VIE_OP_TYPE_TEST,
},
[0x08] = {
.op_byte = 0x08,
.op_byte = 0x08U,
.op_type = VIE_OP_TYPE_OR,
},
[0x09] = {
.op_byte = 0x09,
.op_byte = 0x09U,
.op_type = VIE_OP_TYPE_OR,
},
[0x8F] = {
/* XXX Group 1A extended opcode - not just POP */
.op_byte = 0x8F,
.op_byte = 0x8FU,
.op_type = VIE_OP_TYPE_POP,
},
[0xFF] = {
/* XXX Group 5 extended opcode - not just PUSH */
.op_byte = 0xFF,
.op_byte = 0xFFU,
.op_type = VIE_OP_TYPE_PUSH,
}
};
/* struct vie.mod */
#define VIE_MOD_INDIRECT 0
#define VIE_MOD_INDIRECT_DISP8 1
#define VIE_MOD_INDIRECT_DISP32 2
#define VIE_MOD_DIRECT 3
#define VIE_MOD_INDIRECT 0U
#define VIE_MOD_INDIRECT_DISP8 1U
#define VIE_MOD_INDIRECT_DISP32 2U
#define VIE_MOD_DIRECT 3U
/* struct vie.rm */
#define VIE_RM_SIB 4
@ -405,7 +405,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
size = vie->opsize;
error = -EINVAL;
switch (vie->op.op_byte) {
case 0x88:
case 0x88U:
/*
* MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
* 88/r: mov r/m8, r8
@ -417,7 +417,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = memwrite(vcpu, gpa, byte, size,
arg);
break;
case 0x89:
case 0x89U:
/*
* MOV from reg (ModRM:reg) to mem (ModRM:r/m)
* 89/r: mov r/m16, r16
@ -433,7 +433,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
arg);
}
break;
case 0x8A:
case 0x8AU:
/*
* MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
* 8A/r: mov r8, r/m8
@ -444,7 +444,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
if (error == 0)
error = vie_write_bytereg(vcpu, vie, val);
break;
case 0x8B:
case 0x8BU:
/*
* MOV from mem (ModRM:r/m) to reg (ModRM:reg)
* 8B/r: mov r16, r/m16
@ -458,7 +458,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
val, size);
}
break;
case 0xA1:
case 0xA1U:
/*
* MOV from seg:moffset to AX/EAX/RAX
* A1: mov AX, moffs16
@ -472,7 +472,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
val, size);
}
break;
case 0xA3:
case 0xA3U:
/*
* MOV from AX/EAX/RAX to seg:moffset
* A3: mov moffs16, AX
@ -487,7 +487,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
arg);
}
break;
case 0xC6:
case 0xC6U:
/*
* MOV from imm8 to mem (ModRM:r/m)
* C6/0 mov r/m8, imm8
@ -497,7 +497,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = memwrite(vcpu, gpa, vie->immediate, size,
arg);
break;
case 0xC7:
case 0xC7U:
/*
* MOV from imm16/imm32 to mem (ModRM:r/m)
* C7/0 mov r/m16, imm16
@ -528,7 +528,7 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = -EINVAL;
switch (vie->op.op_byte) {
case 0xB6:
case 0xB6U:
/*
* MOV and zero extend byte from mem (ModRM:r/m) to
* reg (ModRM:reg).
@ -552,7 +552,7 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
/* write the result */
error = vie_update_register(vcpu, reg, val, size);
break;
case 0xB7:
case 0xB7U:
/*
* MOV and zero extend word from mem (ModRM:r/m) to
* reg (ModRM:reg).
@ -571,7 +571,7 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = vie_update_register(vcpu, reg, val, size);
break;
case 0xBE:
case 0xBEU:
/*
* MOV and sign extend byte from mem (ModRM:r/m) to
* reg (ModRM:reg).
@ -671,7 +671,7 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
uint64_t rcx, rdi, rsi, rflags;
int error, fault, opsize, seg, repeat;
opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
opsize = (vie->op.op_byte == 0xA4U) ? 1 : vie->opsize;
error = 0;
/*
@ -829,13 +829,13 @@ emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = -EINVAL;
switch (vie->op.op_byte) {
case 0x84:
case 0x84U:
/*
* 84/r test r8, r/m8
*/
size = 1; /*override size for 8-bit operation*/
/* fallthrough */
case 0x85:
case 0x85U:
/*
* AND reg (ModRM:reg) and mem (ModRM:r/m) and discard
* the result.
@ -897,7 +897,7 @@ emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = -EINVAL;
switch (vie->op.op_byte) {
case 0x23:
case 0x23U:
/*
* AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
* result in reg.
@ -923,8 +923,8 @@ emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = vie_update_register(vcpu, reg, result,
size);
break;
case 0x81:
case 0x83:
case 0x81U:
case 0x83U:
/*
* AND mem (ModRM:r/m) with immediate and store the
* result in mem.
@ -987,8 +987,8 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = -EINVAL;
switch (vie->op.op_byte) {
case 0x81:
case 0x83:
case 0x81U:
case 0x83U:
/*
* OR mem (ModRM:r/m) with immediate and store the
* result in mem.
@ -1018,7 +1018,7 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
result = val1 | vie->immediate;
error = memwrite(vcpu, gpa, result, size, arg);
break;
case 0x09:
case 0x09U:
/*
* OR mem (ModRM:r/m) with reg (ModRM:reg) and store the
* result in mem.
@ -1078,8 +1078,8 @@ emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
size = vie->opsize;
switch (vie->op.op_byte) {
case 0x39:
case 0x3B:
case 0x39U:
case 0x3BU:
/*
* 39/r CMP r/m16, r16
* 39/r CMP r/m32, r32
@ -1115,9 +1115,9 @@ emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
}
rflags2 = getcc(size, op1, op2);
break;
case 0x80:
case 0x81:
case 0x83:
case 0x80U:
case 0x81U:
case 0x83U:
/*
* 80 /7 cmp r/m8, imm8
* REX + 80 /7 cmp r/m8, imm8
@ -1177,7 +1177,7 @@ emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
error = -EINVAL;
switch (vie->op.op_byte) {
case 0x2B:
case 0x2BU:
/*
* SUB r/m from r and store the result in r
*
@ -1230,11 +1230,11 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct seg_desc ss_desc;
uint64_t cr0, rflags, rsp, stack_gla, stack_gpa, val;
int error, size, stackaddrsize, pushop;
uint32_t err_code = 0;
uint32_t err_code = 0U;
memset(&ss_desc, 0, sizeof(ss_desc));
val = 0;
val = 0UL;
size = vie->opsize;
pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
@ -1654,7 +1654,7 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
*/
if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
seg != VM_REG_GUEST_GS) {
segbase = 0;
segbase = 0UL;
} else {
segbase = desc->base;
}
@ -1728,22 +1728,22 @@ segment_override(uint8_t x, int *seg)
{
switch (x) {
case 0x2E:
case 0x2EU:
*seg = VM_REG_GUEST_CS;
break;
case 0x36:
case 0x36U:
*seg = VM_REG_GUEST_SS;
break;
case 0x3E:
case 0x3EU:
*seg = VM_REG_GUEST_DS;
break;
case 0x26:
case 0x26U:
*seg = VM_REG_GUEST_ES;
break;
case 0x64:
case 0x64U:
*seg = VM_REG_GUEST_FS;
break;
case 0x65:
case 0x65U:
*seg = VM_REG_GUEST_GS;
break;
default:
@ -1761,16 +1761,16 @@ decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
if (vie_peek(vie, &x) != 0)
return -1;
if (x == 0x66)
vie->opsize_override = 1;
else if (x == 0x67)
vie->addrsize_override = 1;
else if (x == 0xF3)
vie->repz_present = 1;
else if (x == 0xF2)
vie->repnz_present = 1;
if (x == 0x66U)
vie->opsize_override = 1U;
else if (x == 0x67U)
vie->addrsize_override = 1U;
else if (x == 0xF3U)
vie->repz_present = 1U;
else if (x == 0xF2U)
vie->repnz_present = 1U;
else if (segment_override(x, &vie->segment_register))
vie->segment_override = 1;
vie->segment_override = 1U;
else
break;
@ -2099,7 +2099,7 @@ decode_immediate(struct vie *vie)
static int
decode_moffset(struct vie *vie)
{
int i, n;
int32_t i, n;
uint8_t x;
union {
char buf[8];
@ -2116,7 +2116,7 @@ decode_moffset(struct vie *vie)
n = vie->addrsize;
ASSERT(n == 2 || n == 4 || n == 8, "invalid moffset bytes: %d", n);
u.u64 = 0;
u.u64 = 0UL;
for (i = 0; i < n; i++) {
if (vie_peek(vie, &x) != 0)
return -1;

View File

@ -190,12 +190,12 @@ static int encode_vmcs_seg_desc(int seg, uint32_t *base, uint32_t *lim,
case VM_REG_GUEST_IDTR:
*base = VMX_GUEST_IDTR_BASE;
*lim = VMX_GUEST_IDTR_LIMIT;
*acc = 0xffffffff;
*acc = 0xffffffffU;
break;
case VM_REG_GUEST_GDTR:
*base = VMX_GUEST_GDTR_BASE;
*lim = VMX_GUEST_GDTR_LIMIT;
*acc = 0xffffffff;
*acc = 0xffffffffU;
break;
default:
return -EINVAL;

View File

@ -37,7 +37,7 @@ struct vie_op {
uint16_t op_flags;
};
#define VIE_INST_SIZE 15
#define VIE_INST_SIZE 15U
struct vie {
uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
uint8_t num_valid; /* size of the instruction */

View File

@ -35,10 +35,10 @@ static void vm_setup_cpu_px(struct vm *vm)
{
uint32_t px_data_size;
vm->pm.px_cnt = 0;
vm->pm.px_cnt = 0U;
memset(vm->pm.px_data, 0, MAX_PSTATE * sizeof(struct cpu_px_data));
if ((boot_cpu_data.state_info.px_cnt == 0)
if ((boot_cpu_data.state_info.px_cnt == 0U)
|| (boot_cpu_data.state_info.px_data == NULL)) {
return;
}
@ -59,10 +59,10 @@ static void vm_setup_cpu_cx(struct vm *vm)
{
uint32_t cx_data_size;
vm->pm.cx_cnt = 0;
vm->pm.cx_cnt = 0U;
memset(vm->pm.cx_data, 0, MAX_CSTATE * sizeof(struct cpu_cx_data));
if ((boot_cpu_data.state_info.cx_cnt == 0)
if ((boot_cpu_data.state_info.cx_cnt == 0U)
|| (boot_cpu_data.state_info.cx_data == NULL)) {
return;
}
@ -137,12 +137,12 @@ static uint32_t pm1ab_io_read(__unused struct vm_io_handler *hdlr,
{
uint32_t val = io_read(addr, width);
if (host_enter_s3_success == 0) {
if (host_enter_s3_success == 0U) {
/* If host S3 enter failes, we should set BIT_WAK_STS
* bit for vm0 and let vm0 back from S3 failure path.
*/
if (addr == vm->pm.sx_state_data->pm1a_evt.address) {
val |= (1 << BIT_WAK_STS);
val |= (1U << BIT_WAK_STS);
}
}
return val;
@ -152,9 +152,9 @@ static void pm1ab_io_write(__unused struct vm_io_handler *hdlr,
__unused struct vm *vm, uint16_t addr, size_t width,
uint32_t v)
{
static uint32_t pm1a_cnt_ready = 0;
static uint32_t pm1a_cnt_ready = 0U;
if (width == 2) {
if (width == 2U) {
uint8_t val = get_slp_typx(v);
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address)
@ -173,9 +173,9 @@ static void pm1ab_io_write(__unused struct vm_io_handler *hdlr,
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1b)
&& s3_enabled(v)) {
if (pm1a_cnt_ready) {
if (pm1a_cnt_ready != 0U) {
enter_s3(vm, pm1a_cnt_ready, v);
pm1a_cnt_ready = 0;
pm1a_cnt_ready = 0U;
} else {
/* the case broke ACPI spec */
pr_err("PM1B_CNT write error!");
@ -192,10 +192,10 @@ void register_gas_io_handler(struct vm *vm, struct acpi_generic_address *gas)
uint8_t io_len[5] = {0, 1, 2, 4, 8};
struct vm_io_range gas_io;
if ((gas->address == 0)
if ((gas->address == 0UL)
|| (gas->space_id != SPACE_SYSTEM_IO)
|| (gas->access_size == 0)
|| (gas->access_size > 4))
|| (gas->access_size == 0U)
|| (gas->access_size > 4U))
return;
gas_io.flags = IO_ATTR_RW,

View File

@ -29,14 +29,15 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
{
uint64_t gva;
struct ucode_header uhdr;
int data_size, data_page_num;
int data_page_num;
size_t data_size;
uint8_t *ucode_ptr, *ptr;
int err;
uint32_t err_code;
gva = v - sizeof(struct ucode_header);
err_code = 0;
err_code = 0U;
err = copy_from_gva(vcpu, &uhdr, gva, sizeof(uhdr), &err_code);
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);
@ -52,7 +53,7 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
if (ptr == NULL)
return;
err_code = 0;
err_code = 0U;
err = copy_from_gva(vcpu, ucode_ptr, gva, data_size, &err_code);
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);

View File

@ -18,8 +18,8 @@ struct list_head vm_list = {
/* Lock for VMs list */
spinlock_t vm_list_lock = {
.head = 0,
.tail = 0
.head = 0U,
.tail = 0U
};
/* used for vmid allocation. And this means the max vm number is 64 */
@ -62,7 +62,7 @@ struct vm *get_vm_from_vmid(int vm_id)
int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
{
unsigned int id;
uint32_t id;
struct vm *vm;
int status;
@ -98,15 +98,15 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
goto err1;
}
for (id = 0; id < sizeof(long) * 8; id++)
if (bitmap_test_and_set(id, &vmid_bitmap) == 0)
for (id = 0U; id < (size_t)(sizeof(long) * 8U); id++)
if (!bitmap_test_and_set(id, &vmid_bitmap))
break;
vm->attr.id = vm->attr.boot_idx = id;
atomic_store(&vm->hw.created_vcpus, 0);
/* gpa_lowtop are used for system start up */
vm->hw.gpa_lowtop = 0;
vm->hw.gpa_lowtop = 0UL;
/* Only for SOS: Configure VM software information */
/* For UOS: This VM software information is configure in DM */
if (is_vm0(vm)) {
@ -309,7 +309,8 @@ void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec)
/* Create vm/vcpu for vm0 */
int prepare_vm0(void)
{
int i, ret;
int ret;
uint16_t i;
struct vm *vm = NULL;
struct vm_description *vm_desc = &vm0_desc;
@ -318,7 +319,7 @@ int prepare_vm0(void)
return ret;
/* Allocate all cpus to vm0 at the beginning */
for (i = 0; i < phys_cpu_num; i++)
for (i = 0U; i < phys_cpu_num; i++)
prepare_vcpu(vm, i);
/* start vm0 BSP automatically */

View File

@ -31,10 +31,10 @@ static void enable_msr_interception(uint8_t *bitmap, uint32_t msr)
uint8_t *write_map;
uint8_t value;
/* low MSR */
if (msr < 0x1FFF) {
if (msr < 0x1FFFU) {
read_map = bitmap;
write_map = bitmap + 2048;
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
} else if ((msr >= 0xc0000000U) && (msr <= 0xc0001fffU)) {
read_map = bitmap + 1024;
write_map = bitmap + 3072;
} else {
@ -96,7 +96,7 @@ void init_msr_emulation(struct vcpu *vcpu)
msr_bitmap = vcpu->vm->arch_vm.msr_bitmap;
for (i = 0; i < msrs_count; i++)
for (i = 0U; i < msrs_count; i++)
enable_msr_interception(msr_bitmap, emulated_msrs[i]);
enable_msr_interception(msr_bitmap, MSR_IA32_PERF_CTL);
@ -141,7 +141,7 @@ void init_msr_emulation(struct vcpu *vcpu)
int rdmsr_vmexit_handler(struct vcpu *vcpu)
{
uint32_t msr;
uint64_t v = 0;
uint64_t v = 0UL;
int cur_context = vcpu->arch_vcpu.cur_context;
/* Read the msr value */
@ -233,7 +233,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
pr_warn("rdmsr: %lx should not come here!", msr);
}
vcpu_inject_gp(vcpu, 0);
v = 0;
v = 0UL;
break;
}
}

View File

@ -49,7 +49,7 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req);
#define ACRN_REQUEST_TRP_FAULT 6
#define ACRN_REQUEST_VPID_FLUSH 7 /* flush vpid tlb */
#define E820_MAX_ENTRIES 32
#define E820_MAX_ENTRIES 32U
struct e820_mem_params {
uint64_t mem_bottom;

View File

@ -112,7 +112,7 @@ struct vm_arch {
};
#define CPUID_CHECK_SUBLEAF (1U << 0)
#define MAX_VM_VCPUID_ENTRIES 64
#define MAX_VM_VCPUID_ENTRIES 64U
struct vcpuid_entry {
uint32_t eax;
uint32_t ebx;