mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 12:12:16 +00:00
hv: fix MISRA-C issues related to for loop
This patch fixes the following issues: - Assignment operation in expression. - For loop incrementation is not simple. - No brackets to loop body. - Use of comma operator. v1 -> v2: * Replace &x->y with &(x->y) based on our new coding rule Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
852f613fe3
commit
10c64a5fca
@ -202,8 +202,8 @@ register_gas_io_handler(struct vm *vm, struct acpi_generic_address *gas)
|
||||
return;
|
||||
}
|
||||
|
||||
gas_io.flags = IO_ATTR_RW,
|
||||
gas_io.base = (uint16_t)gas->address,
|
||||
gas_io.flags = IO_ATTR_RW;
|
||||
gas_io.base = (uint16_t)gas->address;
|
||||
gas_io.len = io_len[gas->access_size];
|
||||
|
||||
register_io_emulation_handler(vm, &gas_io,
|
||||
|
@ -35,7 +35,8 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
||||
|
||||
/* wait for previous smp call complete, which may run on other cpus */
|
||||
while (atomic_cmpxchg64(&smp_call_mask, 0UL, mask & INVALID_BIT_INDEX));
|
||||
while ((pcpu_id = ffs64(mask)) != INVALID_BIT_INDEX) {
|
||||
pcpu_id = ffs64(mask);
|
||||
while (pcpu_id != INVALID_BIT_INDEX) {
|
||||
bitmap_clear_nolock(pcpu_id, &mask);
|
||||
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
|
||||
smp_call = &per_cpu(smp_call_info, pcpu_id);
|
||||
@ -46,6 +47,7 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
|
||||
pr_err("pcpu_id %d not in active!", pcpu_id);
|
||||
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
|
||||
}
|
||||
pcpu_id = ffs64(mask);
|
||||
}
|
||||
send_dest_ipi(smp_call_mask, VECTOR_NOTIFY_VCPU,
|
||||
INTR_LAPIC_ICR_LOGICAL);
|
||||
|
@ -264,7 +264,7 @@ int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: 0x%llx, size: 0x%llx\n",
|
||||
__func__, vaddr, size);
|
||||
vaddr_end = vaddr + size;
|
||||
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (pgentry_present(ptt, *pml4e) == 0UL) {
|
||||
@ -276,6 +276,8 @@ int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -434,7 +436,7 @@ int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
paddr = ROUND_PAGE_UP(paddr_base);
|
||||
vaddr_end = vaddr + ROUND_PAGE_DOWN(size);
|
||||
|
||||
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (pgentry_present(ptt, *pml4e) == 0UL) {
|
||||
@ -449,6 +451,7 @@ int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
}
|
||||
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -547,9 +547,10 @@ static void init_guest_context_real(struct vcpu *vcpu)
|
||||
{
|
||||
struct ext_context *ectx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
|
||||
struct segment_sel *seg;
|
||||
|
||||
/* cs, ss, ds, es, fs, gs; cs will be override later. */
|
||||
for(struct segment_sel *seg = &ectx->cs; seg <= &ectx->gs; seg++) {
|
||||
for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) {
|
||||
seg->selector = 0U;
|
||||
seg->base = 0UL;
|
||||
seg->limit = 0xFFFFU;
|
||||
@ -606,15 +607,15 @@ static void init_guest_context_vm0_bsp(struct vcpu *vcpu)
|
||||
struct ext_context *ectx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
|
||||
struct boot_ctx * init_ctx = (struct boot_ctx *)(&vm0_boot_context);
|
||||
uint16_t *sel;
|
||||
uint16_t *sel = &(init_ctx->cs_sel);
|
||||
struct segment_sel *seg;
|
||||
|
||||
for(seg = &ectx->cs, sel = &init_ctx->cs_sel;
|
||||
seg <= &ectx->gs; seg ++, sel++) {
|
||||
for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) {
|
||||
seg->base = 0UL;
|
||||
seg->limit = 0xFFFFFFFFU;
|
||||
seg->attr = PROTECTED_MODE_DATA_SEG_AR;
|
||||
seg->selector = *sel;
|
||||
sel++;
|
||||
}
|
||||
ectx->cs.attr = init_ctx->cs_ar; /* override cs attr */
|
||||
|
||||
@ -644,7 +645,7 @@ static void init_guest_context_protect(struct vcpu *vcpu)
|
||||
struct segment_sel *seg;
|
||||
|
||||
ectx->gdtr.base = create_guest_init_gdt(vcpu->vm, &ectx->gdtr.limit);
|
||||
for(seg = &ectx->cs; seg <= &ectx->gs; seg ++) {
|
||||
for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) {
|
||||
seg->base = 0UL;
|
||||
seg->limit = 0xFFFFFFFFU;
|
||||
seg->attr = PROTECTED_MODE_DATA_SEG_AR;
|
||||
@ -721,7 +722,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
ctx->ext_ctx.tr.limit = 0xFFFFU;
|
||||
ctx->ext_ctx.tr.attr = TR_AR;
|
||||
|
||||
if(vcpu_mode == CPU_MODE_REAL) {
|
||||
if (vcpu_mode == CPU_MODE_REAL) {
|
||||
init_guest_context_real(vcpu);
|
||||
init_guest_vmx(vcpu, CR0_ET | CR0_NE, 0, 0);
|
||||
} else if (is_vm0(vcpu->vm) && is_vcpu_bsp(vcpu)) {
|
||||
|
@ -184,8 +184,9 @@ static void update_trampoline_code_refs(uint64_t dest_pa)
|
||||
*(uint64_t *)(ptr) += dest_pa;
|
||||
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_pdpt_addr));
|
||||
for (i = 0; i < 4; i++)
|
||||
for (i = 0; i < 4; i++) {
|
||||
*(uint64_t *)(ptr + sizeof(uint64_t) * i) += dest_pa;
|
||||
}
|
||||
|
||||
/* update the gdt base pointer with relocated offset */
|
||||
ptr = HPA2HVA(dest_pa + trampoline_relo_addr(&trampoline_gdt_ptr));
|
||||
|
@ -96,8 +96,9 @@ int load_guest(struct vm *vm, struct vcpu *vcpu)
|
||||
lowmem_gpa_top = *(uint64_t *)hva;
|
||||
|
||||
/* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
for (i = 0; i < NUM_GPRS; i++) {
|
||||
vcpu_set_gpreg(vcpu, i, 0UL);
|
||||
}
|
||||
|
||||
hva = GPA2HVA(vm, lowmem_gpa_top -
|
||||
MEM_4K - MEM_2K);
|
||||
@ -169,8 +170,9 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
|
||||
/* Documentation states: ebx=0, edi=0, ebp=0, esi=ptr to
|
||||
* zeropage
|
||||
*/
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
for (i = 0; i < NUM_GPRS; i++) {
|
||||
vcpu_set_gpreg(vcpu, i, 0UL);
|
||||
}
|
||||
|
||||
/* Get host-physical address for guest bootargs */
|
||||
hva = GPA2HVA(vm,
|
||||
|
Loading…
Reference in New Issue
Block a user