HV:MM:add 'U/UL' suffix for unsigned contant value

In the current memory module, there are many constatn value
without U/UL suffix, it is reported as MISRA C violations by
static analysis tool.

Add 'U/UL' suffix for unsigned contant value in memory module
as needed.

Note:In the most case, CPU_PAGE_SIZE(0x1000) is used as
unsigned integer contant value, so CPU_PAGE_SIZE is defined
as unsigned integer contant value, and it is safety converted
into unsigned long type according to MISRA C standard.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Xiangyang Wu
2018-07-10 14:37:31 +08:00
committed by lijinxia
parent b25caad29e
commit f81fcf2f07
11 changed files with 41 additions and 41 deletions

View File

@@ -9,14 +9,14 @@
#include "guest/instr_emul_wrapper.h"
#include "guest/instr_emul.h"
#define ACRN_DBG_EPT 6
#define ACRN_DBG_EPT 6U
static uint64_t find_next_table(uint32_t table_offset, void *table_base)
{
uint64_t table_entry;
uint64_t table_present;
uint64_t sub_table_addr = 0;
uint64_t sub_table_addr = 0UL;
/* Read the table entry */
table_entry = mem_read64(table_base
@@ -31,7 +31,7 @@ static uint64_t find_next_table(uint32_t table_offset, void *table_base)
table_present = (IA32E_EPT_R_BIT | IA32E_EPT_W_BIT | IA32E_EPT_X_BIT);
/* Determine if a valid entry exists */
if ((table_entry & table_present) == 0) {
if ((table_entry & table_present) == 0UL) {
/* No entry present */
return sub_table_addr;
}
@@ -111,8 +111,8 @@ void destroy_ept(struct vm *vm)
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
{
uint64_t hpa = 0;
uint32_t pg_size = 0;
uint64_t hpa = 0UL;
uint32_t pg_size = 0U;
struct entry_params entry;
struct map_params map_params;
@@ -221,7 +221,7 @@ int register_mmio_emulation_handler(struct vm *vm,
if ((read_write != NULL) && (end > start)) {
/* Allocate memory for node */
mmio_node =
(struct mem_io_node *)calloc(1, sizeof(struct mem_io_node));
(struct mem_io_node *)calloc(1U, sizeof(struct mem_io_node));
/* Ensure memory successfully allocated */
if (mmio_node != NULL) {
@@ -460,7 +460,7 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
ASSERT(status == 0, "EPT Misconfiguration is not handled.\n");
TRACE_2L(TRACE_VMEXIT_EPT_MISCONFIGURATION, 0, 0);
TRACE_2L(TRACE_VMEXIT_EPT_MISCONFIGURATION, 0UL, 0UL);
return status;
}

View File

@@ -165,12 +165,12 @@ void flush_vpid_single(int vpid)
if (vpid == 0)
return;
_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0);
_invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0UL);
}
void flush_vpid_global(void)
{
_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0, 0);
_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0, 0UL);
}
void invept(struct vcpu *vcpu)
@@ -530,7 +530,7 @@ static void *walk_paging_struct(void *addr, void *table_base,
}
/* Determine if a valid entry exists */
if ((table_entry & entry_present) == 0) {
if ((table_entry & entry_present) == 0UL) {
/* No entry present - need to allocate a new table */
sub_table_addr = alloc_paging_struct();
/* Check to ensure memory available for this structure*/
@@ -570,7 +570,7 @@ uint64_t get_paging_pml4(void)
void enable_paging(uint64_t pml4_base_addr)
{
uint64_t tmp64 = 0;
uint64_t tmp64 = 0UL;
/* Enable Write Protect, inhibiting writing to read-only pages */
CPU_CR_READ(cr0, &tmp64);
@@ -581,7 +581,7 @@ void enable_paging(uint64_t pml4_base_addr)
void enable_smep(void)
{
uint64_t val64 = 0;
uint64_t val64 = 0UL;
/* Enable CR4.SMEP*/
CPU_CR_READ(cr4, &val64);
@@ -665,8 +665,8 @@ void free_paging_struct(void *ptr)
bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size)
{
uint64_t curr_hpa = 0;
uint64_t next_hpa = 0;
uint64_t curr_hpa = 0UL;
uint64_t next_hpa = 0UL;
/* if size <= PAGE_SIZE_4K, it is continuous,no need check
* if size > PAGE_SIZE_4K, need to fetch next page
@@ -687,7 +687,7 @@ int obtain_last_page_table_entry(struct map_params *map_params,
struct entry_params *entry, void *addr, bool direct)
{
uint64_t table_entry;
uint32_t entry_present = 0;
uint32_t entry_present = 0U;
int ret = 0;
/* Obtain the PML4 address */
void *table_addr = direct ? (map_params->pml4_base)

View File

@@ -74,10 +74,10 @@ static struct key_info g_key_info = {
static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
uint64_t size, uint64_t gpa_rebased)
{
uint64_t nworld_pml4e = 0;
uint64_t sworld_pml4e = 0;
uint64_t nworld_pml4e = 0UL;
uint64_t sworld_pml4e = 0UL;
struct map_params map_params;
uint64_t gpa = 0;
uint64_t gpa = 0UL;
uint64_t hpa = gpa2hpa(vm, gpa_orig);
uint64_t table_present = (IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
@@ -109,7 +109,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0);
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0U);
/* Copy PDPT entries from Normal world to Secure world
* Secure world can access Normal World's memory,
@@ -350,7 +350,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
BUP_MKHI_BOOTLOADER_SEED_LEN,
g_key_info.dseed_list[i].seed,
BUP_MKHI_BOOTLOADER_SEED_LEN,
NULL, 0,
NULL, 0U,
vcpu->vm->GUID, sizeof(vcpu->vm->GUID)) == 0) {
(void)memset(key_info, 0, sizeof(struct key_info));
pr_err("%s: derive dvseed failed!", __func__);
@@ -495,7 +495,7 @@ void trusty_set_dseed(void *dseed, uint8_t dseed_num)
if ((dseed == NULL) || (dseed_num == 0U) ||
(dseed_num > BOOTLOADER_SEED_MAX_ENTRIES)) {
g_key_info.num_seeds = 1;
g_key_info.num_seeds = 1U;
(void)memset(g_key_info.dseed_list[0].seed, 0xA5,
sizeof(g_key_info.dseed_list[0].seed));
return;