mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 14:07:42 +00:00
HV:misc:add suffix U to the numeric constant
Add suffix U to the numeric constant Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
d3ad411c91
commit
96372ed09c
@ -947,8 +947,8 @@ static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
|
||||
} else {
|
||||
strcpy_s(type, 16, "NONE");
|
||||
*irq = IRQ_INVALID;
|
||||
*vector = 0;
|
||||
*dest = 0;
|
||||
*vector = 0U;
|
||||
*dest = 0UL;
|
||||
*lvl_tm = 0;
|
||||
*pin = -1;
|
||||
*vpin = -1;
|
||||
|
@ -13,13 +13,13 @@
|
||||
#endif
|
||||
|
||||
spinlock_t trampoline_spinlock = {
|
||||
.head = 0,
|
||||
.tail = 0
|
||||
.head = 0U,
|
||||
.tail = 0U
|
||||
};
|
||||
|
||||
spinlock_t up_count_spinlock = {
|
||||
.head = 0,
|
||||
.tail = 0
|
||||
.head = 0U,
|
||||
.tail = 0U
|
||||
};
|
||||
|
||||
struct per_cpu_region *per_cpu_data_base_ptr;
|
||||
@ -87,7 +87,7 @@ static inline bool get_monitor_cap(void)
|
||||
* in hypervisor, but still expose it to the guests and
|
||||
* let them handle it correctly
|
||||
*/
|
||||
if (boot_cpu_data.x86 != 0x6 || boot_cpu_data.x86_model != 0x5c)
|
||||
if (boot_cpu_data.x86 != 0x6U || boot_cpu_data.x86_model != 0x5cU)
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -188,8 +188,8 @@ static int hardware_detect_support(void)
|
||||
pr_fatal("%s, LM not supported\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
if ((boot_cpu_data.x86_phys_bits == 0) ||
|
||||
(boot_cpu_data.x86_virt_bits == 0)) {
|
||||
if ((boot_cpu_data.x86_phys_bits == 0U) ||
|
||||
(boot_cpu_data.x86_virt_bits == 0U)) {
|
||||
pr_fatal("%s, can't detect Linear/Physical Address size\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
@ -255,7 +255,7 @@ uint16_t __attribute__((weak)) parse_madt(uint8_t *lapic_id_base)
|
||||
static const uint8_t lapic_id[] = {0U, 2U, 4U, 6U};
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lapic_id); i++)
|
||||
for (i = 0U; i < ARRAY_SIZE(lapic_id); i++)
|
||||
*lapic_id_base++ = lapic_id[i];
|
||||
|
||||
return ARRAY_SIZE(lapic_id);
|
||||
@ -729,7 +729,7 @@ void start_cpus()
|
||||
* configured time-out has expired
|
||||
*/
|
||||
timeout = CONFIG_CPU_UP_TIMEOUT * 1000;
|
||||
while ((up_count != expected_up) && (timeout != 0)) {
|
||||
while ((up_count != expected_up) && (timeout != 0U)) {
|
||||
/* Delay 10us */
|
||||
udelay(10);
|
||||
|
||||
@ -762,7 +762,7 @@ void stop_cpus()
|
||||
}
|
||||
|
||||
expected_up = 1;
|
||||
while ((up_count != expected_up) && (timeout !=0)) {
|
||||
while ((up_count != expected_up) && (timeout != 0U)) {
|
||||
/* Delay 10us */
|
||||
udelay(10);
|
||||
|
||||
@ -875,11 +875,11 @@ static void vapic_cap_detect(void)
|
||||
uint8_t features;
|
||||
uint64_t msr_val;
|
||||
|
||||
features = 0;
|
||||
features = 0U;
|
||||
|
||||
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
|
||||
if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS_TPR_SHADOW)) {
|
||||
cpu_caps.vapic_features = 0;
|
||||
cpu_caps.vapic_features = 0U;
|
||||
return;
|
||||
}
|
||||
features |= VAPIC_FEATURE_TPR_SHADOW;
|
||||
|
@ -57,13 +57,13 @@ void free_ept_mem(void *pml4_addr)
|
||||
return;
|
||||
}
|
||||
|
||||
for (pml4_index = 0; pml4_index < IA32E_NUM_ENTRIES; pml4_index++) {
|
||||
for (pml4_index = 0U; pml4_index < IA32E_NUM_ENTRIES; pml4_index++) {
|
||||
/* Walk from the PML4 table to the PDPT table */
|
||||
pdpt_addr = HPA2HVA(find_next_table(pml4_index, pml4_addr));
|
||||
if (pdpt_addr == NULL)
|
||||
continue;
|
||||
|
||||
for (pdpt_index = 0; pdpt_index < IA32E_NUM_ENTRIES;
|
||||
for (pdpt_index = 0U; pdpt_index < IA32E_NUM_ENTRIES;
|
||||
pdpt_index++) {
|
||||
/* Walk from the PDPT table to the PD table */
|
||||
pde_addr = HPA2HVA(find_next_table(pdpt_index,
|
||||
@ -72,7 +72,7 @@ void free_ept_mem(void *pml4_addr)
|
||||
if (pde_addr == NULL)
|
||||
continue;
|
||||
|
||||
for (pde_index = 0; pde_index < IA32E_NUM_ENTRIES;
|
||||
for (pde_index = 0U; pde_index < IA32E_NUM_ENTRIES;
|
||||
pde_index++) {
|
||||
/* Walk from the PD table to the page table */
|
||||
pte_addr = HPA2HVA(find_next_table(pde_index,
|
||||
@ -105,7 +105,7 @@ void destroy_ept(struct vm *vm)
|
||||
*/
|
||||
if (vm->sworld_control.sworld_enabled && (vm->arch_vm.sworld_eptp != 0U)) {
|
||||
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp));
|
||||
vm->arch_vm.sworld_eptp = 0;
|
||||
vm->arch_vm.sworld_eptp = 0UL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -319,7 +319,7 @@ static int dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual)
|
||||
vcpu->req.type = REQ_WP;
|
||||
}
|
||||
|
||||
if (vcpu->req.type == 0)
|
||||
if (vcpu->req.type == 0U)
|
||||
vcpu->req.type = REQ_MMIO;
|
||||
vcpu->req.reqs.mmio_request.direction = vcpu->mmio.read_write;
|
||||
vcpu->req.reqs.mmio_request.address = (long)vcpu->mmio.paddr;
|
||||
@ -349,7 +349,7 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||
/* TODO: Need to figure out how to determine value being
|
||||
* written
|
||||
*/
|
||||
mmio->value = 0;
|
||||
mmio->value = 0UL;
|
||||
} else {
|
||||
/* Read operation */
|
||||
mmio->read_write = HV_MEM_IO_READ;
|
||||
@ -358,7 +358,7 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||
/* TODO: Need to determine how sign extension is determined for
|
||||
* reads
|
||||
*/
|
||||
mmio->sign_extend_read = 0;
|
||||
mmio->sign_extend_read = 0U;
|
||||
}
|
||||
|
||||
/* Get the guest physical address */
|
||||
|
@ -37,13 +37,13 @@ void load_gdtr_and_tr(void)
|
||||
tss->ist1 = (uint64_t)get_cpu_var(mc_stack) + CONFIG_STACK_SIZE;
|
||||
tss->ist2 = (uint64_t)get_cpu_var(df_stack) + CONFIG_STACK_SIZE;
|
||||
tss->ist3 = (uint64_t)get_cpu_var(sf_stack) + CONFIG_STACK_SIZE;
|
||||
tss->ist4 = 0L;
|
||||
tss->ist4 = 0UL;
|
||||
|
||||
/* tss descriptor */
|
||||
set_tss_desc(&gdt->host_gdt_tss_descriptors,
|
||||
(void *)tss, sizeof(struct tss_64), TSS_AVAIL);
|
||||
|
||||
gdtr.len = sizeof(struct host_gdt) - 1;
|
||||
gdtr.len = sizeof(struct host_gdt) - 1U;
|
||||
gdtr.gdt = gdt;
|
||||
|
||||
asm volatile ("lgdt %0" ::"m"(gdtr));
|
||||
|
@ -12,7 +12,7 @@ int dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
int cur_context = vcpu->arch_vcpu.cur_context;
|
||||
union vhm_request_buffer *req_buf = NULL;
|
||||
uint32_t mask =
|
||||
0xFFFFFFFFul >> (32 - 8 * vcpu->req.reqs.pio_request.size);
|
||||
0xFFFFFFFFUL >> (32 - 8 * vcpu->req.reqs.pio_request.size);
|
||||
uint64_t *rax;
|
||||
|
||||
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
|
||||
@ -164,7 +164,7 @@ void allow_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbytes)
|
||||
uint32_t a;
|
||||
|
||||
b = vm->arch_vm.iobitmap[0];
|
||||
for (i = 0; i < nbytes; i++) {
|
||||
for (i = 0U; i < nbytes; i++) {
|
||||
if ((address & 0x8000U) != 0U)
|
||||
b = vm->arch_vm.iobitmap[1];
|
||||
a = address & 0x7fffU;
|
||||
@ -180,11 +180,11 @@ static void deny_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbyte
|
||||
uint32_t a;
|
||||
|
||||
b = vm->arch_vm.iobitmap[0];
|
||||
for (i = 0; i < nbytes; i++) {
|
||||
for (i = 0U; i < nbytes; i++) {
|
||||
if ((address & 0x8000U) != 0U)
|
||||
b = vm->arch_vm.iobitmap[1];
|
||||
a = address & 0x7fffU;
|
||||
b[a >> 5] |= (1 << (a & 0x1fU));
|
||||
b[a >> 5U] |= (1U << (a & 0x1fU));
|
||||
address++;
|
||||
}
|
||||
}
|
||||
|
@ -296,13 +296,13 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||
default:
|
||||
|
||||
/* Set mapping size to 0 - can't map memory in PML4 */
|
||||
mapped_size = 0;
|
||||
mapped_size = 0U;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check to see if mapping should occur */
|
||||
if (mapped_size != 0) {
|
||||
if (mapped_size != 0U) {
|
||||
/* Get current table entry */
|
||||
uint64_t entry = MEM_READ64(table_base + table_offset);
|
||||
bool prev_entry_present = false;
|
||||
@ -415,7 +415,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||
* TODO: add shootdown APs operation if MMU will be
|
||||
* modified after AP start in the future.
|
||||
*/
|
||||
if ((phys_cpu_num != 0) &&
|
||||
if ((phys_cpu_num != 0U) &&
|
||||
((pcpu_active_bitmap &
|
||||
((1UL << phys_cpu_num) - 1))
|
||||
!= (1UL << CPU_BOOT_ID))) {
|
||||
@ -620,7 +620,7 @@ void init_paging(void)
|
||||
attr_uc);
|
||||
|
||||
/* Modify WB attribute for E820_TYPE_RAM */
|
||||
for (i = 0, entry = &e820[0];
|
||||
for (i = 0U, entry = &e820[0];
|
||||
i < e820_entries;
|
||||
i++, entry = &e820[i]) {
|
||||
if (entry->type == E820_TYPE_RAM) {
|
||||
@ -864,10 +864,10 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
|
||||
static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
||||
void *vaddr, uint64_t page_size, bool direct)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
uint32_t i = 0U;
|
||||
uint64_t pa;
|
||||
uint64_t attr = 0x00;
|
||||
uint64_t next_page_size = 0x00;
|
||||
uint64_t attr = 0x0UL;
|
||||
uint64_t next_page_size = 0x0UL;
|
||||
void *sub_tab_addr = NULL;
|
||||
struct entry_params entry;
|
||||
|
||||
@ -930,7 +930,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
||||
attr |= (entry.entry_val & 0x7fUL);
|
||||
}
|
||||
/* write all entries and keep original attr*/
|
||||
for (i = 0; i < IA32E_NUM_ENTRIES; i++) {
|
||||
for (i = 0U; i < IA32E_NUM_ENTRIES; i++) {
|
||||
MEM_WRITE64(sub_tab_addr + (i * IA32E_COMM_ENTRY_SIZE),
|
||||
(attr | (pa + (i * next_page_size))));
|
||||
}
|
||||
@ -1033,7 +1033,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|
||||
*/
|
||||
page_size = break_page_table(map_params,
|
||||
paddr, vaddr, page_size, direct);
|
||||
if (page_size == 0)
|
||||
if (page_size == 0UL)
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
@ -1043,7 +1043,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|
||||
/* The function return the memory size that one entry can map */
|
||||
adjust_size = update_page_table_entry(map_params, paddr, vaddr,
|
||||
page_size, attr, request_type, direct);
|
||||
if (adjust_size == 0)
|
||||
if (adjust_size == 0UL)
|
||||
return -EINVAL;
|
||||
vaddr += adjust_size;
|
||||
paddr += adjust_size;
|
||||
|
@ -46,7 +46,7 @@ void setup_notification(void)
|
||||
char name[32] = {0};
|
||||
|
||||
cpu = get_cpu_id();
|
||||
if (cpu > 0)
|
||||
if (cpu > 0U)
|
||||
return;
|
||||
|
||||
/* support IPI notification, VM0 will register all CPU */
|
||||
|
@ -21,7 +21,7 @@ void init_softirq(void)
|
||||
uint16_t pcpu_id;
|
||||
|
||||
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
|
||||
per_cpu(softirq_pending, pcpu_id) = 0;
|
||||
per_cpu(softirq_pending, pcpu_id) = 0UL;
|
||||
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, pcpu_id));
|
||||
}
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ struct trusty_mem {
|
||||
|
||||
static struct key_info g_key_info = {
|
||||
.size_of_this_struct = sizeof(g_key_info),
|
||||
.version = 0,
|
||||
.platform = 3,
|
||||
.num_seeds = 1
|
||||
.version = 0U,
|
||||
.platform = 3U,
|
||||
.num_seeds = 1U
|
||||
};
|
||||
|
||||
#define save_segment(seg, SEG_NAME) \
|
||||
@ -71,7 +71,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||
}
|
||||
|
||||
if (!vm->sworld_control.sworld_enabled
|
||||
|| vm->arch_vm.sworld_eptp != 0) {
|
||||
|| vm->arch_vm.sworld_eptp != 0UL) {
|
||||
pr_err("Sworld is not enabled or Sworld eptp is not NULL");
|
||||
return;
|
||||
}
|
||||
@ -323,7 +323,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
|
||||
sizeof(mem->first_page.data.key_info.dseed_list));
|
||||
/* Derive dvseed from dseed for Trusty */
|
||||
key_info = &mem->first_page.data.key_info;
|
||||
for (i = 0; i < g_key_info.num_seeds; i++) {
|
||||
for (i = 0U; i < g_key_info.num_seeds; i++) {
|
||||
if (hkdf_sha256(key_info->dseed_list[i].seed,
|
||||
BUP_MKHI_BOOTLOADER_SEED_LEN,
|
||||
g_key_info.dseed_list[i].seed,
|
||||
@ -364,11 +364,11 @@ static bool init_secure_world_env(struct vcpu *vcpu,
|
||||
uint64_t base_hpa,
|
||||
uint32_t size)
|
||||
{
|
||||
vcpu->arch_vcpu.inst_len = 0;
|
||||
vcpu->arch_vcpu.inst_len = 0U;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].rip = entry_gpa;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].rsp =
|
||||
TRUSTY_EPT_REBASE_GPA + size;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset = 0;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset = 0UL;
|
||||
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].cr0 =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr0;
|
||||
@ -412,12 +412,12 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (boot_param->entry_point == 0) {
|
||||
if (boot_param->entry_point == 0U) {
|
||||
pr_err("%s: Invalid entry point\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (boot_param->base_addr == 0) {
|
||||
if (boot_param->base_addr == 0U) {
|
||||
pr_err("%s: Invalid memory base address\n", __func__);
|
||||
return false;
|
||||
}
|
||||
@ -451,7 +451,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
void trusty_set_dseed(void *dseed, uint8_t dseed_num)
|
||||
{
|
||||
/* Use fake seed if input param is invalid */
|
||||
if ((dseed == NULL) || (dseed_num == 0) ||
|
||||
if ((dseed == NULL) || (dseed_num == 0U) ||
|
||||
(dseed_num > BOOTLOADER_SEED_MAX_ENTRIES)) {
|
||||
|
||||
g_key_info.num_seeds = 1;
|
||||
|
@ -307,7 +307,7 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
val64 = exec_vmread(VMX_GUEST_CR4);
|
||||
if ((val64 & CR4_OSXSAVE) == 0U) {
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -311,12 +311,12 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
struct run_context *context =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
field = (value >> (i * 8)) & 0xffU;
|
||||
for (i = 0U; i < 8U; i++) {
|
||||
field = (value >> (i * 8U)) & 0xffU;
|
||||
if ((PAT_MEM_TYPE_INVALID(field) ||
|
||||
(PAT_FIELD_RSV_BITS & field) != 0U)) {
|
||||
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -364,7 +364,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
|
||||
if ((cr0 & (cr0_always_off_mask | CR0_RESERVED_MASK)) != 0U) {
|
||||
pr_err("Not allow to set always off / reserved bits for CR0");
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -375,7 +375,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
!paging_enabled && ((cr0 & CR0_PG) != 0U)) {
|
||||
if ((context->cr4 & CR4_PAE) == 0U) {
|
||||
pr_err("Can't enable long mode when PAE disabled");
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Enable long mode */
|
||||
@ -402,7 +402,7 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
if (((context->cr0 ^ cr0) & (CR0_CD | CR0_NW)) != 0U) {
|
||||
if ((cr0 & CR0_CD) == 0U && ((cr0 & CR0_NW) != 0U)) {
|
||||
pr_err("not allow to set CR0.NW while clearing CR0.CD");
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -499,14 +499,14 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
||||
/* Check if guest try to set fixed to 0 bits or reserved bits */
|
||||
if((cr4 & cr4_always_off_mask) != 0U) {
|
||||
pr_err("Not allow to set reserved/always off bits for CR4");
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do NOT support nested guest */
|
||||
if ((cr4 & CR4_VMXE) != 0U) {
|
||||
pr_err("Nested guest not supported");
|
||||
vcpu_inject_gp(vcpu, 0);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -601,10 +601,10 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
* if it's resume from S3.
|
||||
*
|
||||
*/
|
||||
if ((uint64_t)vcpu->entry_addr < 0x100000) {
|
||||
sel =((uint64_t)vcpu->entry_addr & 0xFFFF0)
|
||||
>> 4;
|
||||
base = sel << 4;
|
||||
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
|
||||
sel =((uint64_t)vcpu->entry_addr & 0xFFFF0UL)
|
||||
>> 4UL;
|
||||
base = sel << 4U;
|
||||
} else {
|
||||
/* BSP is initialized with real mode */
|
||||
sel = REAL_MODE_BSP_INIT_CODE_SEL;
|
||||
@ -618,21 +618,21 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/* AP is initialized with real mode
|
||||
* and CS value is left shift 8 bits from sipi vector;
|
||||
*/
|
||||
sel = vcpu->arch_vcpu.sipi_vector << 8;
|
||||
base = sel << 4;
|
||||
sel = vcpu->arch_vcpu.sipi_vector << 8U;
|
||||
base = sel << 4U;
|
||||
}
|
||||
limit = 0xffff;
|
||||
limit = 0xffffU;
|
||||
access = REAL_MODE_CODE_SEG_AR;
|
||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||
limit = 0xffffffff;
|
||||
base = 0;
|
||||
limit = 0xffffffffU;
|
||||
base = 0U;
|
||||
access = PROTECTED_MODE_CODE_SEG_AR;
|
||||
sel = 0x10; /* Linear CS selector in guest init gdt */
|
||||
sel = 0x10U; /* Linear CS selector in guest init gdt */
|
||||
} else {
|
||||
HV_ARCH_VMX_GET_CS(sel);
|
||||
access = get_cs_access_rights();
|
||||
limit = 0xffffffff;
|
||||
base = 0;
|
||||
limit = 0xffffffffU;
|
||||
base = 0U;
|
||||
}
|
||||
|
||||
/* Selector */
|
||||
@ -660,14 +660,14 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/***************************************************/
|
||||
/* Set up guest instruction pointer */
|
||||
field = VMX_GUEST_RIP;
|
||||
value32 = 0;
|
||||
value32 = 0U;
|
||||
if (vcpu_mode == CPU_MODE_REAL) {
|
||||
/* RIP is set here */
|
||||
if (is_vcpu_bsp(vcpu)) {
|
||||
if ((uint64_t)vcpu->entry_addr < 0x100000)
|
||||
value32 = (uint64_t)vcpu->entry_addr & 0x0F;
|
||||
if ((uint64_t)vcpu->entry_addr < 0x100000UL)
|
||||
value32 = (uint64_t)vcpu->entry_addr & 0x0FUL;
|
||||
else
|
||||
value32 = 0x0000FFF0;
|
||||
value32 = 0x0000FFF0U;
|
||||
}
|
||||
} else
|
||||
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
|
||||
@ -678,7 +678,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
if (vcpu_mode == CPU_MODE_64BIT) {
|
||||
/* Set up guest stack pointer to 0 */
|
||||
field = VMX_GUEST_RSP;
|
||||
value32 = 0;
|
||||
value32 = 0U;
|
||||
pr_dbg("GUEST RSP on VMEntry %x ",
|
||||
value32);
|
||||
exec_vmwrite(field, value32);
|
||||
@ -691,7 +691,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/* GDTR - Global Descriptor Table */
|
||||
if (vcpu_mode == CPU_MODE_REAL) {
|
||||
/* Base */
|
||||
base = 0;
|
||||
base = 0U;
|
||||
|
||||
/* Limit */
|
||||
limit = 0xFFFF;
|
||||
@ -712,7 +712,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
base = gdtb.base;
|
||||
|
||||
/* Limit */
|
||||
limit = HOST_GDT_SIZE - 1;
|
||||
limit = HOST_GDT_SIZE - 1U;
|
||||
}
|
||||
|
||||
/* GDTR Base */
|
||||
@ -729,7 +729,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
if ((vcpu_mode == CPU_MODE_REAL) ||
|
||||
(vcpu_mode == CPU_MODE_PROTECTED)) {
|
||||
/* Base */
|
||||
base = 0;
|
||||
base = 0U;
|
||||
|
||||
/* Limit */
|
||||
limit = 0xFFFF;
|
||||
@ -778,14 +778,14 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
|
||||
/* Linear data segment in guest init gdt */
|
||||
es = ss = ds = fs = gs = 0x18;
|
||||
limit = 0xffffffff;
|
||||
limit = 0xffffffffU;
|
||||
} else if (vcpu_mode == CPU_MODE_64BIT) {
|
||||
asm volatile ("movw %%es, %%ax":"=a" (es));
|
||||
asm volatile ("movw %%ss, %%ax":"=a" (ss));
|
||||
asm volatile ("movw %%ds, %%ax":"=a" (ds));
|
||||
asm volatile ("movw %%fs, %%ax":"=a" (fs));
|
||||
asm volatile ("movw %%gs, %%ax":"=a" (gs));
|
||||
limit = 0xffffffff;
|
||||
limit = 0xffffffffU;
|
||||
}
|
||||
|
||||
/* Selector */
|
||||
@ -849,7 +849,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_GS_ATTR: 0x%x ", value32);
|
||||
|
||||
/* Base */
|
||||
value = 0;
|
||||
value = 0UL;
|
||||
field = VMX_GUEST_ES_BASE;
|
||||
exec_vmwrite(field, es << 4);
|
||||
pr_dbg("VMX_GUEST_ES_BASE: 0x%016llx ", value);
|
||||
@ -875,17 +875,17 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_LDTR_SEL: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_LDTR_LIMIT;
|
||||
value32 = 0xffffffff;
|
||||
value32 = 0xffffffffU;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_LDTR_LIMIT: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_LDTR_ATTR;
|
||||
value32 = 0x10000;
|
||||
value32 = 0x10000U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_LDTR_ATTR: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_LDTR_BASE;
|
||||
value32 = 0x00;
|
||||
value32 = 0x00U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_LDTR_BASE: 0x%x ", value32);
|
||||
|
||||
@ -896,34 +896,34 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_TR_SEL: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_TR_LIMIT;
|
||||
value32 = 0xff;
|
||||
value32 = 0xffU;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_TR_LIMIT: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_TR_ATTR;
|
||||
value32 = 0x8b;
|
||||
value32 = 0x8bU;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_TR_ATTR: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_TR_BASE;
|
||||
value32 = 0x00;
|
||||
value32 = 0x00U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_TR_BASE: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_INTERRUPTIBILITY_INFO;
|
||||
value32 = 0;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_INTERRUPTIBILITY_INFO: 0x%x ",
|
||||
value32);
|
||||
|
||||
field = VMX_GUEST_ACTIVITY_STATE;
|
||||
value32 = 0;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_ACTIVITY_STATE: 0x%x ",
|
||||
value32);
|
||||
|
||||
field = VMX_GUEST_SMBASE;
|
||||
value32 = 0;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
pr_dbg("VMX_GUEST_SMBASE: 0x%x ", value32);
|
||||
|
||||
@ -941,14 +941,14 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ",
|
||||
value64);
|
||||
|
||||
value64 = 0;
|
||||
value64 = 0UL;
|
||||
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, value64);
|
||||
pr_dbg("VMX_GUEST_IA32_DEBUGCTL: 0x%016llx ",
|
||||
value64);
|
||||
|
||||
/* Set up guest pending debug exception */
|
||||
field = VMX_GUEST_PENDING_DEBUG_EXCEPT;
|
||||
value = 0x0;
|
||||
value = 0x0UL;
|
||||
exec_vmwrite(field, value);
|
||||
pr_dbg("VMX_GUEST_PENDING_DEBUG_EXCEPT: 0x%016llx ", value);
|
||||
|
||||
@ -1305,7 +1305,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
* on corresponding guest * exception - pg 2902 24.6.3
|
||||
* enable VM exit on MC only
|
||||
*/
|
||||
value32 = (1 << IDT_MC);
|
||||
value32 = (1U << IDT_MC);
|
||||
exec_vmwrite(VMX_EXCEPTION_BITMAP, value32);
|
||||
|
||||
/* Set up page fault error code mask - second paragraph * pg 2902
|
||||
|
@ -180,7 +180,7 @@ static int register_hrhd_units(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < info->drhd_count; i++) {
|
||||
for (i = 0U; i < info->drhd_count; i++) {
|
||||
drhd_rt = calloc(1, sizeof(struct dmar_drhd_rt));
|
||||
ASSERT(drhd_rt != NULL, "");
|
||||
drhd_rt->drhd = &info->drhd_units[i];
|
||||
@ -235,7 +235,7 @@ static void iommu_flush_cache(struct dmar_drhd_rt *dmar_uint,
|
||||
if (iommu_ecap_c(dmar_uint->ecap) != 0U)
|
||||
return;
|
||||
|
||||
for (i = 0; i < size; i += CACHE_LINE_SIZE)
|
||||
for (i = 0U; i < size; i += CACHE_LINE_SIZE)
|
||||
clflush((char *)p + i);
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ dmar_unit_support_aw(struct dmar_drhd_rt *dmar_uint, uint32_t addr_width)
|
||||
|
||||
aw = (uint8_t)width_to_agaw(addr_width);
|
||||
|
||||
return ((1 << aw) & iommu_cap_sagaw(dmar_uint->cap)) != 0;
|
||||
return ((1U << aw) & iommu_cap_sagaw(dmar_uint->cap)) != 0;
|
||||
}
|
||||
|
||||
static void dmar_enable_translation(struct dmar_drhd_rt *dmar_uint)
|
||||
@ -425,8 +425,8 @@ static void dmar_register_hrhd(struct dmar_drhd_rt *dmar_uint)
|
||||
|
||||
dmar_uint->max_domain_id = iommu_cap_ndoms(dmar_uint->cap) - 1;
|
||||
|
||||
if (dmar_uint->max_domain_id > 63)
|
||||
dmar_uint->max_domain_id = 63;
|
||||
if (dmar_uint->max_domain_id > 63U)
|
||||
dmar_uint->max_domain_id = 63U;
|
||||
|
||||
if (max_domain_id > dmar_uint->max_domain_id)
|
||||
max_domain_id = dmar_uint->max_domain_id;
|
||||
@ -456,7 +456,7 @@ static struct dmar_drhd_rt *device_to_dmaru(uint16_t segment, uint8_t bus,
|
||||
if (dmar_uint->drhd->segment != segment)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < dmar_uint->drhd->dev_cnt; i++) {
|
||||
for (i = 0U; i < dmar_uint->drhd->dev_cnt; i++) {
|
||||
if ((dmar_uint->drhd->devices[i].bus == bus) &&
|
||||
(dmar_uint->drhd->devices[i].devfun == devfun))
|
||||
return dmar_uint;
|
||||
@ -481,8 +481,8 @@ static uint8_t alloc_domain_id(void)
|
||||
/* domain id 0 is reserved, when CM = 1.
|
||||
* so domain id allocation start from 1
|
||||
*/
|
||||
for (i = 1; i < 64; i++) {
|
||||
mask = (1 << i);
|
||||
for (i = 1U; i < 64U; i++) {
|
||||
mask = (1UL << i);
|
||||
if ((domain_bitmap & mask) == 0) {
|
||||
domain_bitmap |= mask;
|
||||
break;
|
||||
@ -509,8 +509,8 @@ static struct iommu_domain *create_host_domain(void)
|
||||
domain->is_host = true;
|
||||
domain->dom_id = alloc_domain_id();
|
||||
/* dmar uint need to support translation passthrough */
|
||||
domain->trans_table_ptr = 0;
|
||||
domain->addr_width = 48;
|
||||
domain->trans_table_ptr = 0UL;
|
||||
domain->addr_width = 48U;
|
||||
|
||||
return domain;
|
||||
}
|
||||
@ -846,7 +846,7 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
|
||||
|
||||
/* TODO: check if a domain with the vm_id exists */
|
||||
|
||||
if (translation_table == 0) {
|
||||
if (translation_table == 0UL) {
|
||||
pr_err("translation table is NULL");
|
||||
return NULL;
|
||||
}
|
||||
@ -857,7 +857,7 @@ struct iommu_domain *create_iommu_domain(int vm_id, uint64_t translation_table,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
domain = calloc(1, sizeof(struct iommu_domain));
|
||||
domain = calloc(1U, sizeof(struct iommu_domain));
|
||||
|
||||
ASSERT(domain != NULL, "");
|
||||
domain->is_host = false;
|
||||
@ -910,8 +910,8 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
uint64_t *context_table;
|
||||
struct dmar_root_entry *root_entry;
|
||||
struct dmar_context_entry *context_entry;
|
||||
uint64_t upper = 0;
|
||||
uint64_t lower = 0;
|
||||
uint64_t upper = 0UL;
|
||||
uint64_t lower = 0UL;
|
||||
|
||||
if (domain == NULL)
|
||||
return 1;
|
||||
@ -935,7 +935,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (dmar_uint->root_table_addr == 0) {
|
||||
if (dmar_uint->root_table_addr == 0UL) {
|
||||
void *root_table_vaddr = alloc_paging_struct();
|
||||
|
||||
if (root_table_vaddr != NULL) {
|
||||
@ -964,7 +964,7 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
lower = DMAR_SET_BITSLICE(lower,
|
||||
ROOT_ENTRY_LOWER_PRESENT, 1);
|
||||
|
||||
root_entry->upper = 0;
|
||||
root_entry->upper = 0UL;
|
||||
root_entry->lower = lower;
|
||||
iommu_flush_cache(dmar_uint, root_entry,
|
||||
sizeof(struct dmar_root_entry));
|
||||
@ -992,8 +992,8 @@ static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
}
|
||||
|
||||
/* setup context entry for the devfun */
|
||||
upper = 0;
|
||||
lower = 0;
|
||||
upper = 0UL;
|
||||
lower = 0UL;
|
||||
if (domain->is_host) {
|
||||
if (iommu_ecap_pt(dmar_uint->ecap) != 0U) {
|
||||
/* When the Translation-type (T) field indicates
|
||||
@ -1069,8 +1069,8 @@ remove_iommu_device(struct iommu_domain *domain, uint16_t segment,
|
||||
}
|
||||
|
||||
/* clear the present bit first */
|
||||
context_entry->lower = 0;
|
||||
context_entry->upper = 0;
|
||||
context_entry->lower = 0UL;
|
||||
context_entry->upper = 0UL;
|
||||
iommu_flush_cache(dmar_uint, context_entry,
|
||||
sizeof(struct dmar_context_entry));
|
||||
|
||||
@ -1143,7 +1143,7 @@ void suspend_iommu(void)
|
||||
{
|
||||
struct dmar_drhd_rt *dmar_unit;
|
||||
struct list_head *pos;
|
||||
uint32_t i, iommu_idx = 0;
|
||||
uint32_t i, iommu_idx = 0U;
|
||||
|
||||
list_for_each(pos, &dmar_drhd_units) {
|
||||
dmar_unit = list_entry(pos, struct dmar_drhd_rt, list);
|
||||
@ -1157,7 +1157,7 @@ void suspend_iommu(void)
|
||||
dmar_invalid_iotlb_global(dmar_unit);
|
||||
|
||||
/* save IOMMU fault register state */
|
||||
for (i = 0; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++)
|
||||
for (i = 0U; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++)
|
||||
iommu_fault_state[iommu_idx][i] =
|
||||
iommu_read32(dmar_unit, DMAR_FECTL_REG +
|
||||
i * IOMMU_FAULT_REGISTER_STATE_NUM);
|
||||
@ -1178,7 +1178,7 @@ void resume_iommu(void)
|
||||
{
|
||||
struct dmar_drhd_rt *dmar_unit;
|
||||
struct list_head *pos;
|
||||
uint32_t i, iommu_idx = 0;
|
||||
uint32_t i, iommu_idx = 0U;
|
||||
|
||||
/* restore IOMMU fault register state */
|
||||
list_for_each(pos, &dmar_drhd_units) {
|
||||
@ -1196,7 +1196,7 @@ void resume_iommu(void)
|
||||
dmar_invalid_iotlb_global(dmar_unit);
|
||||
|
||||
/* restore IOMMU fault register state */
|
||||
for (i = 0; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++)
|
||||
for (i = 0U; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++)
|
||||
iommu_write32(dmar_unit, DMAR_FECTL_REG +
|
||||
i * IOMMU_FAULT_REGISTER_STATE_NUM,
|
||||
iommu_fault_state[iommu_idx][i]);
|
||||
@ -1228,8 +1228,8 @@ int init_iommu(void)
|
||||
|
||||
host_domain = create_host_domain();
|
||||
|
||||
for (bus = 0; bus <= IOMMU_INIT_BUS_LIMIT; bus++) {
|
||||
for (devfun = 0; devfun <= 255; devfun++) {
|
||||
for (bus = 0U; bus <= IOMMU_INIT_BUS_LIMIT; bus++) {
|
||||
for (devfun = 0U; devfun <= 255U; devfun++) {
|
||||
add_iommu_device(host_domain, 0,
|
||||
(uint8_t)bus, (uint8_t)devfun);
|
||||
}
|
||||
|
@ -117,11 +117,11 @@ biosacpi_search_rsdp(char *base, int length)
|
||||
if (strncmp(rsdp->signature, ACPI_SIG_RSDP,
|
||||
strnlen_s(ACPI_SIG_RSDP, 8)) == 0) {
|
||||
cp = (uint8_t *)rsdp;
|
||||
sum = 0;
|
||||
sum = NULL;
|
||||
for (idx = 0; idx < RSDP_CHECKSUM_LENGTH; idx++)
|
||||
sum += *(cp + idx);
|
||||
|
||||
if (sum != 0)
|
||||
if (sum != NULL)
|
||||
continue;
|
||||
|
||||
return rsdp;
|
||||
|
@ -254,9 +254,9 @@ static void uart16550_read(struct tgt_uart *tgt_uart, void *buffer,
|
||||
uart16550_read_reg(tgt_uart->base_address, RBR_IDX);
|
||||
|
||||
/* Read 1 byte */
|
||||
*bytes_read = 1;
|
||||
*bytes_read = 1U;
|
||||
} else {
|
||||
*bytes_read = 0;
|
||||
*bytes_read = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user