From 2f2d108b1e787bec6af3668712b3a3b0e5a4125f Mon Sep 17 00:00:00 2001 From: Minggui Cao Date: Thu, 12 Jul 2018 11:47:49 +0800 Subject: [PATCH] HV: handle integral issue report by MISRA-C The main focus on: constant suffix U/UL; parameters cast like uint32 to a uint16 variable; unify some APIs interface, consist with the callers. also modify some places to unify code style Signed-off-by: Minggui Cao --- hypervisor/arch/x86/cpu.c | 2 +- hypervisor/arch/x86/guest/ucode.c | 12 ++--- hypervisor/arch/x86/guest/vm.c | 11 ++--- hypervisor/arch/x86/guest/vmcall.c | 2 +- hypervisor/arch/x86/ioapic.c | 8 ++-- hypervisor/arch/x86/irq.c | 8 ++-- hypervisor/arch/x86/lapic.c | 57 +++++++++++------------ hypervisor/arch/x86/softirq.c | 2 +- hypervisor/arch/x86/virq.c | 38 +++++++-------- hypervisor/include/arch/x86/guest/guest.h | 27 ++++------- hypervisor/include/arch/x86/guest/vcpu.h | 9 +++- hypervisor/include/arch/x86/irq.h | 2 +- hypervisor/include/arch/x86/lapic.h | 47 +++++++++---------- hypervisor/include/arch/x86/per_cpu.h | 2 +- hypervisor/include/arch/x86/softirq.h | 2 +- hypervisor/include/lib/spinlock.h | 4 +- 16 files changed, 113 insertions(+), 120 deletions(-) diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index 1e1c546cb..32a0afb9f 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -309,7 +309,7 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum cpu_state state) } /* Set state for the specified CPU */ - per_cpu(state, pcpu_id) = state; + per_cpu(cpu_state, pcpu_id) = state; spinlock_release(&up_count_spinlock); } diff --git a/hypervisor/arch/x86/guest/ucode.c b/hypervisor/arch/x86/guest/ucode.c index 6d543d565..811b4719a 100644 --- a/hypervisor/arch/x86/guest/ucode.c +++ b/hypervisor/arch/x86/guest/ucode.c @@ -10,9 +10,9 @@ uint64_t get_microcode_version(void) { uint64_t val; - uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; + uint32_t eax = 0U, ebx = 0U, ecx = 0U, edx = 0U; - msr_write(MSR_IA32_BIOS_SIGN_ID, 0); + msr_write(MSR_IA32_BIOS_SIGN_ID, 0U); cpuid(CPUID_FEATURES, &eax, &ebx, &ecx, &edx); val = msr_read(MSR_IA32_BIOS_SIGN_ID); @@ -24,14 +24,14 @@ uint64_t get_microcode_version(void) * header is zero, the ucode length is 2000 */ #define UCODE_GET_DATA_SIZE(uhdr) \ - ((uhdr.data_size != 0U) ? uhdr.data_size : 2000) + ((uhdr.data_size != 0U) ? uhdr.data_size : 2000U) void acrn_update_ucode(struct vcpu *vcpu, uint64_t v) { uint64_t gva; struct ucode_header uhdr; - int data_page_num; + uint32_t data_page_num; size_t data_size; - uint8_t *ucode_ptr, *ptr; + uint8_t *ucode_ptr; int err; uint32_t err_code; @@ -48,7 +48,7 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v) data_size = UCODE_GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header); data_page_num = - (data_size + CPU_PAGE_SIZE - 1) >> CPU_PAGE_SHIFT; + (data_size + CPU_PAGE_SIZE - 1U) >> CPU_PAGE_SHIFT; ucode_ptr = alloc_pages(data_page_num); if (ucode_ptr == NULL) { diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index f2173296c..a9757b944 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -23,7 +23,7 @@ spinlock_t vm_list_lock = { }; /* used for vmid allocation. And this means the max vm number is 64 */ -static unsigned long vmid_bitmap; +static uint64_t vmid_bitmap; static void init_vm(struct vm_description *vm_desc, struct vm *vm_handle) @@ -62,7 +62,7 @@ struct vm *get_vm_from_vmid(uint16_t vm_id) int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm) { - uint32_t id; + uint16_t id; struct vm *vm; int status; @@ -72,7 +72,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm) } /* Allocate memory for virtual machine */ - vm = calloc(1, sizeof(struct vm)); + vm = calloc(1U, sizeof(struct vm)); if (vm == NULL) { pr_err("%s, vm allocation failed\n", __func__); return -ENOMEM; @@ -83,7 +83,6 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm) */ init_vm(vm_desc, vm); - /* Init mmio list */ INIT_LIST_HEAD(&vm->mmio_list); @@ -92,7 +91,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm) } vm->hw.vcpu_array = - calloc(1, sizeof(struct vcpu *) * vm->hw.num_vcpus); + calloc(1U, sizeof(struct vcpu *) * vm->hw.num_vcpus); if (vm->hw.vcpu_array == NULL) { pr_err("%s, vcpu_array allocation failed\n", __func__); status = -ENOMEM; @@ -311,7 +310,7 @@ void resume_vm(struct vm *vm) */ void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec) { - struct vcpu *bsp = vcpu_from_vid(vm, 0); + struct vcpu *bsp = vcpu_from_vid(vm, 0U); vm->state = VM_STARTED; diff --git a/hypervisor/arch/x86/guest/vmcall.c b/hypervisor/arch/x86/guest/vmcall.c index a7414bb95..bd06d4ff5 100644 --- a/hypervisor/arch/x86/guest/vmcall.c +++ b/hypervisor/arch/x86/guest/vmcall.c @@ -149,7 +149,7 @@ int vmcall_vmexit_handler(struct vcpu *vcpu) } out: - cur_context->guest_cpu_regs.regs.rax = ret; + cur_context->guest_cpu_regs.regs.rax = (uint64_t)ret; TRACE_2L(TRACE_VMEXIT_VMCALL, vm->attr.id, hypcall_id); diff --git a/hypervisor/arch/x86/ioapic.c b/hypervisor/arch/x86/ioapic.c index 147dd7cae..a758b458b 100644 --- a/hypervisor/arch/x86/ioapic.c +++ b/hypervisor/arch/x86/ioapic.c @@ -10,7 +10,7 @@ #define IOAPIC_REGSEL_OFFSET 0 #define IOAPIC_WINSWL_OFFSET 0x10 -#define IOAPIC_MAX_PIN 240U +#define IOAPIC_MAX_PIN 240U #define IOAPIC_INVALID_PIN 0xffU /* IOAPIC Redirection Table (RTE) Entry structure */ @@ -109,7 +109,7 @@ ioapic_read_reg32(const void *ioapic_base, const uint32_t offset) spinlock_irqsave_obtain(&ioapic_lock); /* Write IOREGSEL */ - mmio_write_long(offset, (void *)ioapic_base); + mmio_write_long(offset, (void *)ioapic_base + IOAPIC_REGSEL_OFFSET); /* Read IOWIN */ v = mmio_read_long((void *)ioapic_base + IOAPIC_WINSWL_OFFSET); @@ -126,7 +126,7 @@ ioapic_write_reg32(const void *ioapic_base, spinlock_irqsave_obtain(&ioapic_lock); /* Write IOREGSEL */ - mmio_write_long(offset, (void *)ioapic_base); + mmio_write_long(offset, (void *)ioapic_base + IOAPIC_REGSEL_OFFSET); /* Write IOWIN */ mmio_write_long(value, (void *)ioapic_base + IOAPIC_WINSWL_OFFSET); @@ -173,7 +173,7 @@ ioapic_set_rte_entry(void *ioapic_addr, static inline struct ioapic_rte create_rte_for_legacy_irq(uint32_t irq, uint32_t vr) { - struct ioapic_rte rte = {0, 0}; + struct ioapic_rte rte = {0U, 0U}; /* Legacy IRQ 0-15 setup, default masked * are actually defined in either MPTable or ACPI MADT table diff --git a/hypervisor/arch/x86/irq.c b/hypervisor/arch/x86/irq.c index f8ad6131b..93b25836a 100644 --- a/hypervisor/arch/x86/irq.c +++ b/hypervisor/arch/x86/irq.c @@ -156,8 +156,8 @@ static void _irq_desc_free_vector(uint32_t irq) static void disable_pic_irq(void) { - io_write_byte(0xff, 0xA1); - io_write_byte(0xff, 0x21); + io_write_byte(0xffU, 0xA1U); + io_write_byte(0xffU, 0x21U); } static bool @@ -250,7 +250,7 @@ common_register_handler(uint32_t irq, goto OUT; } - node = calloc(1, sizeof(struct dev_handler_node)); + node = calloc(1U, sizeof(struct dev_handler_node)); if (node == NULL) { pr_err("failed to alloc node"); irq_desc_try_free_vector(irq); @@ -285,7 +285,7 @@ OUT: /* we are okay using strcpy_s here even with spinlock * since no #PG in HV right now */ - (void)strcpy_s(node->name, 32, info->name); + (void)strcpy_s(node->name, 32U, info->name); dev_dbg(ACRN_DBG_IRQ, "[%s] %s irq%d vr:0x%x", __func__, node->name, irq, desc->vector); } diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index 45c6e295f..59f39984c 100644 --- a/hypervisor/arch/x86/lapic.c +++ b/hypervisor/arch/x86/lapic.c @@ -127,7 +127,7 @@ union lapic_base_msr { }; struct lapic_info { - int init_status; + bool init_done; struct { uint64_t paddr; void *vaddr; @@ -140,7 +140,7 @@ static union lapic_base_msr lapic_base_msr; static inline uint32_t read_lapic_reg32(uint32_t offset) { - if (offset < 0x20 || offset > 0x3ff) + if (offset < 0x20U || offset > 0x3ffU) return 0; return mmio_read_long(lapic_info.xapic.vaddr + offset); @@ -148,7 +148,7 @@ static inline uint32_t read_lapic_reg32(uint32_t offset) inline void write_lapic_reg32(uint32_t offset, uint32_t value) { - if (offset < 0x20 || offset > 0x3ff) + if (offset < 0x20U || offset > 0x3ffU) return; mmio_write_long(value, lapic_info.xapic.vaddr + offset); @@ -156,7 +156,7 @@ inline void write_lapic_reg32(uint32_t offset, uint32_t value) static void clear_lapic_isr(void) { - uint64_t isr_reg = LAPIC_IN_SERVICE_REGISTER_0; + uint32_t isr_reg = LAPIC_IN_SERVICE_REGISTER_0; /* This is a Intel recommended procedure and assures that the processor * does not get hung up due to already set "in-service" interrupts left @@ -165,10 +165,10 @@ static void clear_lapic_isr(void) */ do { if (read_lapic_reg32(isr_reg) != 0U) { - write_lapic_reg32(LAPIC_EOI_REGISTER, 0); + write_lapic_reg32(LAPIC_EOI_REGISTER, 0U); continue; } - isr_reg += 0x10; + isr_reg += 0x10U; } while (isr_reg <= LAPIC_IN_SERVICE_REGISTER_7); } @@ -186,25 +186,25 @@ int early_init_lapic(void) lapic_base_msr.value = msr_read(MSR_IA32_APIC_BASE); /* Initialize globals only 1 time */ - if (lapic_info.init_status == false) { + if (lapic_info.init_done == false) { /* Get Local APIC physical address. */ lapic_info.xapic.paddr = LAPIC_BASE; /* Map in the local xAPIC */ map_lapic(); - lapic_info.init_status = true; + lapic_info.init_done = true; } /* Check if xAPIC mode enabled */ - if (lapic_base_msr.fields.xAPIC_enable == 0) { + if (lapic_base_msr.fields.xAPIC_enable == 0U) { /* Ensure in xAPIC mode */ - lapic_base_msr.fields.xAPIC_enable = 1; - lapic_base_msr.fields.x2APIC_enable = 0; + lapic_base_msr.fields.xAPIC_enable = 1U; + lapic_base_msr.fields.x2APIC_enable = 0U; msr_write(MSR_IA32_APIC_BASE, lapic_base_msr.value); } else { /* Check if x2apic is disabled */ - ASSERT(lapic_base_msr.fields.x2APIC_enable == 0, + ASSERT(lapic_base_msr.fields.x2APIC_enable == 0U, "Disable X2APIC in BIOS"); } @@ -218,7 +218,7 @@ int init_lapic(uint16_t pcpu_id) ((1U << pcpu_id) << 24U)); /* Set the Destination Format Register */ - write_lapic_reg32(LAPIC_DESTINATION_FORMAT_REGISTER, 0xf << 28); + write_lapic_reg32(LAPIC_DESTINATION_FORMAT_REGISTER, 0xfU << 28U); /* Mask all LAPIC LVT entries before enabling the local APIC */ write_lapic_reg32(LAPIC_LVT_CMCI_REGISTER, LAPIC_LVT_MASK); @@ -306,7 +306,7 @@ static void restore_lapic(struct lapic_regs *regs) void suspend_lapic(void) { - uint32_t val = 0; + uint32_t val; save_lapic(&saved_lapic_regs); @@ -326,7 +326,7 @@ void resume_lapic(void) void send_lapic_eoi(void) { - write_lapic_reg32(LAPIC_EOI_REGISTER, 0); + write_lapic_reg32(LAPIC_EOI_REGISTER, 0U); } static void wait_for_delivery(void) @@ -345,7 +345,7 @@ uint8_t get_cur_lapic_id(void) uint8_t lapic_id; lapic_id_reg = read_lapic_reg32(LAPIC_ID_REGISTER); - lapic_id = (lapic_id_reg >> 24U); + lapic_id = (uint8_t)(lapic_id_reg >> 24U); return lapic_id; } @@ -363,7 +363,7 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand, ASSERT(status == 0, "Incorrect arguments"); - icr.value = 0; + icr.value = 0U; icr.bits.destination_mode = INTR_LAPIC_ICR_PHYSICAL; if (cpu_startup_shorthand == INTR_CPU_STARTUP_USE_DEST) { @@ -371,7 +371,7 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand, icr.x_bits.dest_field = per_cpu(lapic_id, dest_pcpu_id); } else { /* Use destination shorthand */ shorthand = INTR_LAPIC_ICR_ALL_EX_SELF; - icr.value_32.hi_32 = 0; + icr.value_32.hi_32 = 0U; } /* Assert INIT IPI */ @@ -386,8 +386,8 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand, /* Give 10ms for INIT sequence to complete for old processors. * Modern processors (family == 6) don't need to wait here. */ - if (boot_cpu_data.family != 6) - mdelay(10); + if (boot_cpu_data.family != 6U) + mdelay(10U); /* De-assert INIT IPI */ write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_1, icr.value_32.hi_32); @@ -397,17 +397,17 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand, /* Send Start IPI with page number of secondary reset code */ write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_1, icr.value_32.hi_32); - icr.value_32.lo_32 = 0; + icr.value_32.lo_32 = 0U; icr.bits.shorthand = shorthand; icr.bits.delivery_mode = INTR_LAPIC_ICR_STARTUP; - icr.bits.vector = ((uint64_t) cpu_startup_start_address) >> 12; + icr.bits.vector = cpu_startup_start_address >> 12U; write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_0, icr.value_32.lo_32); wait_for_delivery(); - if (boot_cpu_data.family == 6) /* 10us is enough for Modern processors */ - udelay(10); - else /* 200us for old processors */ - udelay(200); + if (boot_cpu_data.family == 6U) + udelay(10U); /* 10us is enough for Modern processors */ + else + udelay(200U); /* 200us for old processors */ /* Send another start IPI as per the Intel Arch specification */ write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_1, icr.value_32.hi_32); @@ -440,8 +440,7 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector) } int send_shorthand_ipi(uint8_t vector, - enum intr_lapic_icr_shorthand shorthand, - enum intr_lapic_icr_delivery_mode delivery_mode) + uint8_t shorthand, uint8_t delivery_mode) { union apic_icr icr; int status = 0; @@ -453,7 +452,7 @@ int send_shorthand_ipi(uint8_t vector, ASSERT(status == 0, "Incorrect arguments"); - icr.value = 0; + icr.value = 0U; icr.bits.shorthand = shorthand; icr.bits.delivery_mode = delivery_mode; icr.bits.vector = vector; diff --git a/hypervisor/arch/x86/softirq.c b/hypervisor/arch/x86/softirq.c index 90d317eed..822948178 100644 --- a/hypervisor/arch/x86/softirq.c +++ b/hypervisor/arch/x86/softirq.c @@ -26,7 +26,7 @@ void init_softirq(void) } } -void raise_softirq(int softirq_id) +void raise_softirq(uint16_t softirq_id) { uint16_t cpu_id = get_cpu_id(); uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id); diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index 6ea82b26b..b146f9b88 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -9,7 +9,7 @@ #define EXCEPTION_ERROR_CODE_VALID 8U #define INTERRPUT_QUEUE_BUFF_SIZE 255 -#define ACRN_DBG_INTR 6 +#define ACRN_DBG_INTR 6U #define EXCEPTION_CLASS_BENIGN 1 #define EXCEPTION_CLASS_CONT 2 @@ -54,19 +54,19 @@ static int is_guest_irq_enabled(struct vcpu *vcpu) { struct run_context *cur_context = &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context]; - uint32_t guest_rflags, guest_state; + uint64_t guest_rflags, guest_state; int status = false; /* Read the RFLAGS of the guest */ guest_rflags = cur_context->rflags; /* Check the RFLAGS[IF] bit first */ - if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0U) { + if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) { /* Interrupts are allowed */ /* Check for temporarily disabled interrupts */ guest_state = exec_vmread(VMX_GUEST_INTERRUPTIBILITY_INFO); if ((guest_state & (HV_ARCH_VCPU_BLOCKED_BY_STI | - HV_ARCH_VCPU_BLOCKED_BY_MOVSS)) == 0) { + HV_ARCH_VCPU_BLOCKED_BY_MOVSS)) == 0UL) { status = true; } } @@ -76,7 +76,7 @@ static int is_guest_irq_enabled(struct vcpu *vcpu) static bool vcpu_pending_request(struct vcpu *vcpu) { struct vlapic *vlapic; - uint32_t vector = 0; + uint32_t vector = 0U; int ret = 0; /* Query vLapic to get vector to inject */ @@ -91,10 +91,10 @@ static bool vcpu_pending_request(struct vcpu *vcpu) vcpu_make_request(vcpu, ACRN_REQUEST_EVENT); } - return vcpu->arch_vcpu.pending_req != 0; + return vcpu->arch_vcpu.pending_req != 0UL; } -void vcpu_make_request(struct vcpu *vcpu, int eventid) +void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid) { bitmap_set(eventid, &vcpu->arch_vcpu.pending_req); /* @@ -113,7 +113,7 @@ void vcpu_make_request(struct vcpu *vcpu, int eventid) static int vcpu_do_pending_event(struct vcpu *vcpu) { struct vlapic *vlapic = vcpu->arch_vcpu.vlapic; - uint32_t vector = 0; + uint32_t vector = 0U; int ret = 0; if (is_vapic_intr_delivery_supported()) { @@ -133,7 +133,7 @@ static int vcpu_do_pending_event(struct vcpu *vcpu) if (ret == 0) return -1; - if (!(vector >= 16 && vector <= 255)) { + if (!(vector >= 16U && vector <= 255U)) { dev_dbg(ACRN_DBG_INTR, "invalid vector %d from local APIC", vector); return -1; @@ -205,12 +205,12 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code) { /* VECTOR_INVALID is also greater than 32 */ - if (vector >= 32) { + if (vector >= 32U) { pr_err("invalid exception vector %d", vector); return -EINVAL; } - int32_t prev_vector = + uint32_t prev_vector = vcpu->arch_vcpu.exception_info.exception; int32_t new_class, prev_class; @@ -229,7 +229,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, new_class != EXCEPTION_CLASS_BENIGN)) { /* generate double fault */ vector = IDT_DF; - err_code = 0; + err_code = 0U; } vcpu->arch_vcpu.exception_info.exception = vector; @@ -308,7 +308,7 @@ void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code) int interrupt_window_vmexit_handler(struct vcpu *vcpu) { - int value32; + uint32_t value32; TRACE_2L(TRACE_VMEXIT_INTERRUPT_WINDOW, 0UL, 0UL); @@ -323,7 +323,7 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu) /* No interrupts to inject. * Disable the interrupt window exiting */ - vcpu->arch_vcpu.irq_window_enabled = 0; + vcpu->arch_vcpu.irq_window_enabled = 0U; value32 = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS); value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, value32); @@ -361,7 +361,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu) int acrn_handle_pending_request(struct vcpu *vcpu) { int ret = 0; - int tmp; + uint64_t tmp; bool intr_pending = false; uint64_t *pending_req_bits = &vcpu->arch_vcpu.pending_req; @@ -448,8 +448,8 @@ INTR_WIN: intr_pending = vcpu_pending_request(vcpu); /* Enable interrupt window exiting if pending */ - if (intr_pending && vcpu->arch_vcpu.irq_window_enabled == 0) { - vcpu->arch_vcpu.irq_window_enabled = 1; + if (intr_pending && vcpu->arch_vcpu.irq_window_enabled == 0U) { + vcpu->arch_vcpu.irq_window_enabled = 1U; tmp = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS); tmp |= (VMX_PROCBASED_CTLS_IRQ_WIN); exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, tmp); @@ -478,13 +478,13 @@ void cancel_event_injection(struct vcpu *vcpu) exec_vmread(VMX_ENTRY_EXCEPTION_ERROR_CODE); vcpu->arch_vcpu.inject_info.intr_info = intinfo; - exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, 0); + exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, 0UL); } } int exception_vmexit_handler(struct vcpu *vcpu) { - uint32_t intinfo, int_err_code = 0; + uint32_t intinfo, int_err_code = 0U; uint32_t exception_vector = VECTOR_INVALID; uint32_t cpl; int status = 0; diff --git a/hypervisor/include/arch/x86/guest/guest.h b/hypervisor/include/arch/x86/guest/guest.h index 036d9e55b..32070beeb 100644 --- a/hypervisor/include/arch/x86/guest/guest.h +++ b/hypervisor/include/arch/x86/guest/guest.h @@ -11,8 +11,8 @@ #define VM_RESUME 0 #define VM_LAUNCH 1 -#define ACRN_DBG_PTIRQ 6 -#define ACRN_DBG_IRQ 6 +#define ACRN_DBG_PTIRQ 6U +#define ACRN_DBG_IRQ 6U #ifndef ASSEMBLER @@ -39,14 +39,14 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req); /* * VCPU related APIs */ -#define ACRN_REQUEST_EXCP 0 -#define ACRN_REQUEST_EVENT 1 -#define ACRN_REQUEST_EXTINT 2 -#define ACRN_REQUEST_NMI 3 -#define ACRN_REQUEST_TMR_UPDATE 4 -#define ACRN_REQUEST_EPT_FLUSH 5 -#define ACRN_REQUEST_TRP_FAULT 6 -#define ACRN_REQUEST_VPID_FLUSH 7 /* flush vpid tlb */ +#define ACRN_REQUEST_EXCP 0U +#define ACRN_REQUEST_EVENT 1U +#define ACRN_REQUEST_EXTINT 2U +#define ACRN_REQUEST_NMI 3U +#define ACRN_REQUEST_TMR_UPDATE 4U +#define ACRN_REQUEST_EPT_FLUSH 5U +#define ACRN_REQUEST_TRP_FAULT 6U +#define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */ #define E820_MAX_ENTRIES 32U @@ -69,13 +69,6 @@ struct vm_lu_mem_map { uint64_t size; /* Size of map */ }; -enum vm_cpu_mode { - CPU_MODE_REAL, - CPU_MODE_PROTECTED, - CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ - CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ -}; - /* Use # of paging level to identify paging mode */ enum vm_paging_mode { PAGING_MODE_0_LEVEL = 0, /* Flat */ diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 371b2b0e1..d7a0a02a0 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -87,6 +87,13 @@ enum vcpu_state { VCPU_UNKNOWN_STATE, }; +enum vm_cpu_mode { + CPU_MODE_REAL, + CPU_MODE_PROTECTED, + CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ + CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ +}; + struct cpu_regs { uint64_t rax; uint64_t rbx; @@ -202,7 +209,7 @@ struct vcpu_arch { uint32_t exception; /* The error number for the exception. */ - int error; + uint32_t error; } exception_info; uint8_t lapic_mask; diff --git a/hypervisor/include/arch/x86/irq.h b/hypervisor/include/arch/x86/irq.h index f23bd80ee..09e6e7742 100644 --- a/hypervisor/include/arch/x86/irq.h +++ b/hypervisor/include/arch/x86/irq.h @@ -99,7 +99,7 @@ void vcpu_inject_extint(struct vcpu *vcpu); void vcpu_inject_nmi(struct vcpu *vcpu); void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code); void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code); -void vcpu_make_request(struct vcpu *vcpu, int eventid); +void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid); int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code); int exception_vmexit_handler(struct vcpu *vcpu); diff --git a/hypervisor/include/arch/x86/lapic.h b/hypervisor/include/arch/x86/lapic.h index 7584300d3..3ecbfffc1 100644 --- a/hypervisor/include/arch/x86/lapic.h +++ b/hypervisor/include/arch/x86/lapic.h @@ -9,36 +9,31 @@ #define DEBUG_LAPIC 0 -enum intr_lapic_icr_delivery_mode { - INTR_LAPIC_ICR_FIXED = 0x0, - INTR_LAPIC_ICR_LP = 0x1, - INTR_LAPIC_ICR_SMI = 0x2, - INTR_LAPIC_ICR_NMI = 0x4, - INTR_LAPIC_ICR_INIT = 0x5, - INTR_LAPIC_ICR_STARTUP = 0x6, -}; +/* intr_lapic_icr_delivery_mode */ +#define INTR_LAPIC_ICR_FIXED 0x0U +#define INTR_LAPIC_ICR_LP 0x1U +#define INTR_LAPIC_ICR_SMI 0x2U +#define INTR_LAPIC_ICR_NMI 0x4U +#define INTR_LAPIC_ICR_INIT 0x5U +#define INTR_LAPIC_ICR_STARTUP 0x6U -enum intr_lapic_icr_dest_mode { - INTR_LAPIC_ICR_PHYSICAL = 0x0, - INTR_LAPIC_ICR_LOGICAL = 0x1 -}; +/* intr_lapic_icr_dest_mode */ +#define INTR_LAPIC_ICR_PHYSICAL 0x0U +#define INTR_LAPIC_ICR_LOGICAL 0x1U -enum intr_lapic_icr_level { - INTR_LAPIC_ICR_DEASSERT = 0x0, - INTR_LAPIC_ICR_ASSERT = 0x1, -}; +/* intr_lapic_icr_level */ +#define INTR_LAPIC_ICR_DEASSERT 0x0U +#define INTR_LAPIC_ICR_ASSERT 0x1U -enum intr_lapic_icr_trigger { - INTR_LAPIC_ICR_EDGE = 0x0, - INTR_LAPIC_ICR_LEVEL = 0x1, -}; +/* intr_lapic_icr_trigger */ +#define INTR_LAPIC_ICR_EDGE 0x0U +#define INTR_LAPIC_ICR_LEVEL 0x1U -enum intr_lapic_icr_shorthand { - INTR_LAPIC_ICR_USE_DEST_ARRAY = 0x0, - INTR_LAPIC_ICR_SELF = 0x1, - INTR_LAPIC_ICR_ALL_INC_SELF = 0x2, - INTR_LAPIC_ICR_ALL_EX_SELF = 0x3, -}; +/* intr_lapic_icr_shorthand */ +#define INTR_LAPIC_ICR_USE_DEST_ARRAY 0x0U +#define INTR_LAPIC_ICR_SELF 0x1U +#define INTR_LAPIC_ICR_ALL_INC_SELF 0x2U +#define INTR_LAPIC_ICR_ALL_EX_SELF 0x3U /* Default LAPIC base */ #define LAPIC_BASE 0xFEE00000U diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h index f06162e2a..7621ac56f 100644 --- a/hypervisor/include/arch/x86/per_cpu.h +++ b/hypervisor/include/arch/x86/per_cpu.h @@ -32,7 +32,7 @@ struct per_cpu_region { struct emul_ctxt g_inst_ctxt; struct host_gdt gdt; struct tss_64 tss; - enum cpu_state state; + enum cpu_state cpu_state; uint8_t mc_stack[CONFIG_STACK_SIZE] __aligned(16); uint8_t df_stack[CONFIG_STACK_SIZE] __aligned(16); uint8_t sf_stack[CONFIG_STACK_SIZE] __aligned(16); diff --git a/hypervisor/include/arch/x86/softirq.h b/hypervisor/include/arch/x86/softirq.h index 0c768c025..4f0280285 100644 --- a/hypervisor/include/arch/x86/softirq.h +++ b/hypervisor/include/arch/x86/softirq.h @@ -18,6 +18,6 @@ void enable_softirq(uint16_t cpu_id); void disable_softirq(uint16_t cpu_id); void init_softirq(void); -void raise_softirq(int softirq_id); +void raise_softirq(uint16_t softirq_id); void exec_softirq(void); #endif /* SOFTIRQ_H */ diff --git a/hypervisor/include/lib/spinlock.h b/hypervisor/include/lib/spinlock.h index 14978e92c..613e4b099 100644 --- a/hypervisor/include/lib/spinlock.h +++ b/hypervisor/include/lib/spinlock.h @@ -68,12 +68,12 @@ static inline void spinlock_release(spinlock_t *lock) #define spinlock_irqsave_obtain(l) \ do { \ CPU_INT_ALL_DISABLE(); \ - spinlock_obtain((l)); \ + spinlock_obtain(l); \ } while (0) #define spinlock_irqrestore_release(l) \ do { \ - spinlock_release((l)); \ + spinlock_release(l); \ CPU_INT_ALL_RESTORE(); \ } while (0)