diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c index f6ed5a9f8..1099d8da5 100644 --- a/hypervisor/arch/x86/guest/virq.c +++ b/hypervisor/arch/x86/guest/virq.c @@ -531,3 +531,20 @@ int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu) return status; } + +int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu) +{ + uint32_t value32; + + /* + * Disable NMI-window exiting here. We will process + * the pending request in acrn_handle_pending_request later + */ + value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); + value32 &= ~VMX_PROCBASED_CTLS_NMI_WINEXIT; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + + vcpu_retain_rip(vcpu); + + return 0; +} diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index 1efa37db6..7dd2d806f 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -582,7 +582,7 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) * directly without vmexit. So, here we enable NMI-exiting and use NMI * as notification signal after passthroughing the lapic to vCPU. */ - value32 |= VMX_PINBASED_CTLS_NMI_EXIT; + value32 |= VMX_PINBASED_CTLS_NMI_EXIT | VMX_PINBASED_CTLS_VIRT_NMI; exec_vmwrite32(VMX_PIN_VM_EXEC_CONTROLS, value32); value32 = exec_vmread32(VMX_EXIT_CONTROLS); diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index 8042e1191..506a66cd6 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -51,7 +51,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { [VMX_EXIT_REASON_INTERRUPT_WINDOW] = { .handler = interrupt_window_vmexit_handler}, [VMX_EXIT_REASON_NMI_WINDOW] = { - .handler = unhandled_vmexit_handler}, + .handler = nmi_window_vmexit_handler}, [VMX_EXIT_REASON_TASK_SWITCH] = { .handler = unhandled_vmexit_handler}, [VMX_EXIT_REASON_CPUID] = { diff --git a/hypervisor/arch/x86/irq.c b/hypervisor/arch/x86/irq.c index 158165d67..b0ba03514 100644 --- a/hypervisor/arch/x86/irq.c +++ b/hypervisor/arch/x86/irq.c @@ -18,6 +18,7 @@ #include #include #include +#include static spinlock_t exception_spinlock = { .head = 0U, .tail = 0U, }; static spinlock_t irq_alloc_spinlock = { .head = 0U, .tail = 0U, }; @@ -384,10 +385,36 @@ void dispatch_exception(struct intr_excp_ctx *ctx) void handle_nmi(__unused struct intr_excp_ctx *ctx) { + uint32_t value32; + /* - * Just ignore the NMI here for now. - * TODO: implement specific NMI handling function. + * There is a window where we may miss the current request in this + * notification period when the work flow is as the following: + * + * CPUx + + CPUr + * | | + * | +--+ + * | | | Handle pending req + * | <--+ + * +--+ | + * | | Set req flag | + * <--+ | + * +------------------>---+ + * | Send NMI | | Handle NMI + * | <--+ + * | | + * | | + * | +--> vCPU enter + * | | + * + + + * + * So, here we enable the NMI-window exiting to trigger the next vmexit + * once there is no "virtual-NMI blocking" after vCPU enter into VMX non-root + * mode. Then we can process the pending request on time. */ + value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); + value32 |= VMX_PROCBASED_CTLS_NMI_WINEXIT; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); } static void init_irq_descs(void) diff --git a/hypervisor/include/arch/x86/irq.h b/hypervisor/include/arch/x86/irq.h index 4eac7dce7..2c96e11ee 100644 --- a/hypervisor/include/arch/x86/irq.h +++ b/hypervisor/include/arch/x86/irq.h @@ -209,6 +209,7 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid); * @pre vcpu != NULL */ int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu); +int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu); int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu); int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu); int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu);