diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index fbac84a3f..090915459 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -166,6 +166,49 @@ void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val) } } +/* + * Write the eoi_exit_bitmaps to VMCS fields + */ +void vcpu_set_vmcs_eoi_exit(struct acrn_vcpu *vcpu) +{ + pr_dbg("%s", __func__); + + spinlock_obtain(&(vcpu->arch.lock)); + if (is_apicv_intr_delivery_supported()) { + exec_vmwrite64(VMX_EOI_EXIT0_FULL, vcpu->arch.eoi_exit_bitmap[0]); + exec_vmwrite64(VMX_EOI_EXIT1_FULL, vcpu->arch.eoi_exit_bitmap[1]); + exec_vmwrite64(VMX_EOI_EXIT2_FULL, vcpu->arch.eoi_exit_bitmap[2]); + exec_vmwrite64(VMX_EOI_EXIT3_FULL, vcpu->arch.eoi_exit_bitmap[3]); + } + spinlock_release(&(vcpu->arch.lock)); +} + +/* + * Set the eoi_exit_bitmap bit for specific vector + * called with vcpu->arch.lock held + * @pre vcpu != NULL && vector <= 255U + */ +void vcpu_set_eoi_exit(struct acrn_vcpu *vcpu, uint32_t vector) +{ + pr_dbg("%s", __func__); + + if (bitmap_test_and_set_nolock((uint16_t)(vector & 0x3fU), + &(vcpu->arch.eoi_exit_bitmap[vector >> 6U]))) { + pr_warn("Duplicated vector %u vcpu%u", vector, vcpu->vcpu_id); + } +} + +/* + * Reset all eoi_exit_bitmaps + * called with vcpu->arch.lock held + */ +void vcpu_reset_eoi_exit_all(struct acrn_vcpu *vcpu) +{ + pr_dbg("%s", __func__); + + memset((void *)(vcpu->arch.eoi_exit_bitmap), 0U, sizeof(vcpu->arch.eoi_exit_bitmap)); +} + struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id) { return per_cpu(ever_run_vcpu, pcpu_id); @@ -379,6 +422,8 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn init_vmtrr(vcpu); #endif + spinlock_init(&(vcpu->arch.lock)); + /* Populate the return handle */ *rtn_vcpu_handle = vcpu; diff --git a/hypervisor/arch/x86/virq.c b/hypervisor/arch/x86/virq.c index ace5f52ca..be9337720 100644 --- a/hypervisor/arch/x86/virq.c +++ b/hypervisor/arch/x86/virq.c @@ -409,6 +409,10 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu) vioapic_update_tmr(vcpu); } + if (bitmap_test_and_clear_lock(ACRN_REQUEST_EOI_EXIT_UPDATE, pending_req_bits)) { + vcpu_set_vmcs_eoi_exit(vcpu); + } + /* handling cancelled event injection when vcpu is switched out */ if (arch->inject_event_pending) { if ((arch->inject_info.intr_info & (EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) { diff --git a/hypervisor/dm/vioapic.c b/hypervisor/dm/vioapic.c index 89dd6b691..cb853cc4c 100644 --- a/hypervisor/dm/vioapic.c +++ b/hypervisor/dm/vioapic.c @@ -33,7 +33,7 @@ #include #define RTBL_RO_BITS (uint32_t)(IOAPIC_RTE_REM_IRR | IOAPIC_RTE_DELIVS) -#define NEED_TMR_UPDATE (IOAPIC_RTE_TRGRMOD | IOAPIC_RTE_DELMOD | IOAPIC_RTE_INTVEC) +#define NEED_EOI_EXIT_UPDATE (IOAPIC_RTE_TRGRMOD | IOAPIC_RTE_DELMOD | IOAPIC_RTE_DEST_MASK | IOAPIC_RTE_INTVEC) #define ACRN_DBG_IOAPIC 6U #define ACRN_IOAPIC_VERSION 0x11U @@ -171,6 +171,66 @@ vioapic_set_irqline_lock(const struct acrn_vm *vm, uint32_t irqline, uint32_t op spinlock_release(&(vioapic->mtx)); } +/* + * Generate eoi_exit_bitmap and request each VCPU to update VMCS fields + * To be called with vioapic->mtx + * @pre vioapic != NULL + */ +static void +vioapic_update_eoi_exit(const struct acrn_vioapic *vioapic) +{ + struct acrn_vcpu *vcpu; + union ioapic_rte rte; + uint64_t mask; + uint32_t vector, delmode, dest; + uint32_t pin, pincount; + uint16_t vcpu_id; + bool level, phys; + + dev_dbg(ACRN_DBG_IOAPIC, "%s", __func__); + + /* clear old bitmap to generate new bitmap */ + foreach_vcpu(vcpu_id, vioapic->vm, vcpu) { + spinlock_obtain(&(vcpu->arch.lock)); + vcpu_reset_eoi_exit_all(vcpu); + } + + /* go through RTEs and set corresponding bits of eoi_exit_bitmap */ + pincount = vioapic_pincount(vioapic->vm); + for (pin = 0U; pin < pincount; pin++) { + rte = vioapic->rtbl[pin]; + + level = ((rte.full & IOAPIC_RTE_TRGRLVL) != 0UL); + vector = rte.u.lo_32 & IOAPIC_RTE_LOW_INTVEC; + + if (level && ((vector >= 0x20U) && (vector < NR_MAX_VECTOR))) { + /* if level-trigger and vector is valid */ + delmode = (uint32_t)(rte.full & IOAPIC_RTE_DELMOD); + + if ((delmode != APIC_DELMODE_FIXED) && (delmode != APIC_DELMODE_LOWPRIO)) { + dev_dbg(ACRN_DBG_IOAPIC, + "Ignoring level trigger-mode for delivery-mode 0x%x", delmode); + } else { + dest = (uint32_t)((rte.full) >> IOAPIC_RTE_DEST_SHIFT); + phys = ((rte.full & IOAPIC_RTE_DESTLOG) == 0UL); + calcvdest(vioapic->vm, &mask, dest, phys); + + for (vcpu_id = ffs64(mask); vcpu_id != INVALID_BIT_INDEX; vcpu_id = ffs64(mask)) { + vcpu = vcpu_from_vid(vioapic->vm, vcpu_id); + vcpu_set_eoi_exit(vcpu, vector); + bitmap_clear_nolock(vcpu_id, &mask); + } + } + } + } + + /* make request if eoi_exit_bitmap changed */ + foreach_vcpu(vcpu_id, vioapic->vm, vcpu) { + spinlock_release(&(vcpu->arch.lock)); + vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_UPDATE); + } +} + /* * Reset the vlapic's trigger-mode register to reflect the ioapic pin * configuration. @@ -358,15 +418,9 @@ static void vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr, * rendezvous all the vcpus to update their vlapic * trigger-mode registers. */ - if ((changed & NEED_TMR_UPDATE) != 0UL) { - uint16_t i; - struct acrn_vcpu *vcpu; - - dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: recalculate vlapic trigger-mode reg", pin); - - foreach_vcpu(i, vioapic->vm, vcpu) { - vcpu_make_request(vcpu, ACRN_REQUEST_TMR_UPDATE); - } + if ((changed & NEED_EOI_EXIT_UPDATE) != 0UL) { + dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: recalculate vlapic trigger-mode reg", pin); + vioapic_update_eoi_exit(vioapic); } /* diff --git a/hypervisor/include/arch/x86/guest/guest.h b/hypervisor/include/arch/x86/guest/guest.h index 3d2b24c6a..051347e2c 100644 --- a/hypervisor/include/arch/x86/guest/guest.h +++ b/hypervisor/include/arch/x86/guest/guest.h @@ -39,6 +39,7 @@ #define ACRN_REQUEST_EPT_FLUSH 5U #define ACRN_REQUEST_TRP_FAULT 6U #define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */ +#define ACRN_REQUEST_EOI_EXIT_UPDATE 8U #define E820_MAX_ENTRIES 32U diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 2eb3464d5..da38ad25b 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -169,6 +169,8 @@ struct ext_context { #define NUM_COMMON_MSRS 7U #define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS) +#define EOI_EXIT_BITMAP_SIZE 256U + struct event_injection_info { uint32_t intr_info; uint32_t error_code; @@ -247,6 +249,10 @@ struct acrn_vcpu_arch { /* List of MSRS to be stored and loaded on VM exits or VM entries */ struct msr_store_area msr_area; + + /* EOI_EXIT_BITMAP buffer, for the bitmap update */ + uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U]; + spinlock_t lock; } __aligned(PAGE_SIZE); struct acrn_vm; @@ -498,6 +504,36 @@ uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr); */ void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val); +/** + * @brief write eoi_exit_bitmap to VMCS fields + * + * @param[in] vcpu pointer to vcpu data structure + * + * @return void + */ +void vcpu_set_vmcs_eoi_exit(struct acrn_vcpu *vcpu); + +/** + * @brief reset eoi_exit_bitmap + * + * @param[in] vcpu pointer to vcpu data structure + * + * @return void + */ + +void vcpu_reset_eoi_exit_all(struct acrn_vcpu *vcpu); + +/** + * @brief set eoi_exit_bitmap bit + * + * Set corresponding bit of vector in eoi_exit_bitmap + * + * @param[in] vcpu pointer to vcpu data structure + * @param[in] vector + * + * @return void + */ +void vcpu_set_eoi_exit(struct acrn_vcpu *vcpu, uint32_t vector); /** * @brief set all the vcpu registers *