From 3b1deda0eb31d38d49c5475d55622b63e767fd27 Mon Sep 17 00:00:00 2001 From: Minggui Cao Date: Fri, 17 Dec 2021 14:02:01 +0800 Subject: [PATCH] hv: revert NMI notification by INIT signal NMI is used to notify LAPIC-PT RTVM, to kick its CPU into hypervisor. But NMI could be used by system devices, like PMU (Performance Monitor Unit). So use INIT signal as the partition CPU notification function, to replace injecting NMI. Also remove unused NMI as notification related code. Tracked-On: #6966 Acked-by: Anthony Xu Signed-off-by: Minggui Cao --- hypervisor/arch/x86/guest/vcpu.c | 2 +- hypervisor/arch/x86/guest/virq.c | 14 +-------- hypervisor/arch/x86/guest/vmcs.c | 14 ++------- hypervisor/arch/x86/guest/vmexit.c | 38 ++++++++++++++---------- hypervisor/arch/x86/lapic.c | 11 +++++-- hypervisor/arch/x86/nmi.c | 35 +++++----------------- hypervisor/arch/x86/notify.c | 22 -------------- hypervisor/common/schedule.c | 10 +++---- hypervisor/include/arch/x86/asm/lapic.h | 4 +-- hypervisor/include/arch/x86/asm/notify.h | 1 - hypervisor/include/common/schedule.h | 4 +-- 11 files changed, 53 insertions(+), 102 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 13e22e87d..7e86219d9 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -794,7 +794,7 @@ void kick_vcpu(struct acrn_vcpu *vcpu) (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) { if (is_lapic_pt_enabled(vcpu)) { /* For lapic-pt vCPUs */ - send_single_nmi(pcpu_id); + send_single_init(pcpu_id); } else { send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR); } diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c index 11e9afab0..e5ce6f87f 100644 --- a/hypervisor/arch/x86/guest/virq.c +++ b/hypervisor/arch/x86/guest/virq.c @@ -231,19 +231,7 @@ int32_t vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector_arg, uint32 arch->exception_info.error = 0U; } - if ((vector == IDT_NMI) && is_notification_nmi(vcpu->vm)) { - /* - * Currently, ACRN doesn't support vNMI well and there is no well-designed - * way to check if the NMI is for notification or not. Here we take all the - * NMIs as notification NMI for lapic-pt VMs temporarily. - * - * TODO: Add a way in is_notification_nmi to check the NMI is for notification - * or not in order to support vNMI. - */ - pr_dbg("This NMI is used as notification signal. So ignore it."); - } else { - vcpu_make_request(vcpu, ACRN_REQUEST_EXCP); - } + vcpu_make_request(vcpu, ACRN_REQUEST_EXCP); } } diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index ac0e24d86..e6df1d9a9 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -595,7 +595,6 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) * Disable posted interrupt processing * update x2apic msr bitmap for pass-thru * enable inteception only for ICR - * enable NMI exit as we will use NMI to kick vCPU thread * disable pre-emption for TSC DEADLINE MSR * Disable Register Virtualization and virtual interrupt delivery * Disable "use TPR shadow" @@ -607,15 +606,6 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) value32 &= ~VMX_PINBASED_CTLS_POST_IRQ; } - /* - * ACRN hypervisor needs to kick vCPU off VMX non-root mode to do some - * operations in hypervisor, such as interrupt/exception injection, EPT - * flush etc. For non lapic-pt vCPUs, we can use IPI to do so. But, it - * doesn't work for lapic-pt vCPUs as the IPI will be injected to VMs - * directly without vmexit. So, here we enable NMI-exiting and use NMI - * as notification signal after passthroughing the lapic to vCPU. - */ - value32 |= VMX_PINBASED_CTLS_NMI_EXIT | VMX_PINBASED_CTLS_VIRT_NMI; exec_vmwrite32(VMX_PIN_VM_EXEC_CONTROLS, value32); value32 = exec_vmread32(VMX_EXIT_CONTROLS); @@ -640,11 +630,11 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) update_msr_bitmap_x2apic_passthru(vcpu); /* - * After passthroughing lapic to guest, we should use NMI signal to + * After passthroughing lapic to guest, we should use INIT signal to * notify vcpu thread instead of IPI. Because the IPI will be delivered * the guest directly without vmexit. */ - vcpu->thread_obj.notify_mode = SCHED_NOTIFY_NMI; + vcpu->thread_obj.notify_mode = SCHED_NOTIFY_INIT; } else { value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2); value32 &= ~VMX_PROCBASED_CTLS2_VAPIC; diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index a464e0899..4e89c3809 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -39,6 +39,7 @@ static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu); static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu); +static int32_t init_signal_vmexit_handler(__unused struct acrn_vcpu *vcpu); /* VM Dispatch table for Exit condition handling */ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { @@ -49,7 +50,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { [VMX_EXIT_REASON_TRIPLE_FAULT] = { .handler = triple_fault_vmexit_handler}, [VMX_EXIT_REASON_INIT_SIGNAL] = { - .handler = undefined_vmexit_handler}, + .handler = init_signal_vmexit_handler}, [VMX_EXIT_REASON_STARTUP_IPI] = { .handler = unhandled_vmexit_handler}, [VMX_EXIT_REASON_IO_SMI] = { @@ -236,20 +237,8 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu) (void)vcpu_queue_exception(vcpu, vector, err_code); vcpu->arch.idt_vectoring_info = 0U; } else if (type == VMX_INT_TYPE_NMI) { - if (is_notification_nmi(vcpu->vm)) { - /* - * Currently, ACRN doesn't support vNMI well and there is no well-designed - * way to check if the NMI is for notification or not. Here we take all the - * NMIs as notification NMI for lapic-pt VMs temporarily. - * - * TODO: Add a way in is_notification_nmi to check the NMI is for notification - * or not in order to support vNMI. - */ - pr_dbg("This NMI is used as notification signal. So ignore it."); - } else { - vcpu_make_request(vcpu, ACRN_REQUEST_NMI); - vcpu->arch.idt_vectoring_info = 0U; - } + vcpu_make_request(vcpu, ACRN_REQUEST_NMI); + vcpu->arch.idt_vectoring_info = 0U; } else { /* No action on EXT_INT or SW exception. */ } @@ -492,6 +481,25 @@ static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu) return 0; } +/* + * This handler is only triggered by INIT signal when poweroff from inside of RTVM + */ +static int32_t init_signal_vmexit_handler(__unused struct acrn_vcpu *vcpu) +{ + /* + * Intel SDM Volume 3, 25.2: + * INIT signals. INIT signals cause VM exits. A logical processer performs none + * of the operations normally associated with these events. Such exits do not modify + * register state or clear pending events as they would outside of VMX operation (If + * a logical processor is the wait-for-SIPI state, INIT signals are blocked. They do + * not cause VM exits in this case). + * + * So, it is safe to ignore the signal but need retain its RIP. + */ + vcpu_retain_rip(vcpu); + return 0; +} + /* * vmexit handler for just injecting a #UD exception * ACRN doesn't enable VMFUNC, VMFUNC treated as undefined. diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index 237b1fde4..34611739e 100644 --- a/hypervisor/arch/x86/lapic.c +++ b/hypervisor/arch/x86/lapic.c @@ -278,12 +278,19 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector) * * @return None */ -void send_single_nmi(uint16_t pcpu_id) +void send_single_init(uint16_t pcpu_id) { union apic_icr icr; + /* + * Intel SDM Vol3 23.8: + * The INIT signal is blocked whenever a logical processor is in VMX root operation. + * It is not blocked in VMX nonroot operation. Instead, INITs cause VM exits + */ + icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id); - icr.value_32.lo_32 = (INTR_LAPIC_ICR_PHYSICAL << 11U) | (INTR_LAPIC_ICR_NMI << 8U); + icr.value_32.lo_32 = (INTR_LAPIC_ICR_PHYSICAL << 11U) | (INTR_LAPIC_ICR_INIT << 8U); msr_write(MSR_IA32_EXT_APIC_ICR, icr.value); + } diff --git a/hypervisor/arch/x86/nmi.c b/hypervisor/arch/x86/nmi.c index 0b0232c82..ba8cc2feb 100644 --- a/hypervisor/arch/x86/nmi.c +++ b/hypervisor/arch/x86/nmi.c @@ -7,36 +7,17 @@ #include #include +#include +#include + void handle_nmi(__unused struct intr_excp_ctx *ctx) { - uint32_t value32; + uint16_t pcpu_id = get_pcpu_id(); + struct acrn_vcpu *vcpu = get_running_vcpu(pcpu_id); /* - * There is a window where we may miss the current request in this - * notification period when the work flow is as the following: - * - * CPUx + + CPUr - * | | - * | +--+ - * | | | Handle pending req - * | <--+ - * +--+ | - * | | Set req flag | - * <--+ | - * +------------------>---+ - * | Send NMI | | Handle NMI - * | <--+ - * | | - * | | - * | +--> vCPU enter - * | | - * + + - * - * So, here we enable the NMI-window exiting to trigger the next vmexit - * once there is no "virtual-NMI blocking" after vCPU enter into VMX non-root - * mode. Then we can process the pending request on time. + * If NMI occurs, inject it into current vcpu. Now just PMI is verified. + * For other kind of NMI, it may need to be checked further. */ - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 |= VMX_PROCBASED_CTLS_NMI_WINEXIT; - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + vcpu_make_request(vcpu, ACRN_REQUEST_NMI); } diff --git a/hypervisor/arch/x86/notify.c b/hypervisor/arch/x86/notify.c index b8efb5bf6..2f4583f2d 100644 --- a/hypervisor/arch/x86/notify.c +++ b/hypervisor/arch/x86/notify.c @@ -122,25 +122,3 @@ void setup_pi_notification(void) } } } - -/** - * @brief Check if the NMI is for notification purpose - * - * @return true, if the NMI is triggered for notifying vCPU - * @return false, if the NMI is triggered for other purpose - */ -bool is_notification_nmi(const struct acrn_vm *vm) -{ - bool ret; - - /* - * Currently, ACRN doesn't support vNMI well and there is no well-designed - * way to check if the NMI is for notification or not. Here we take all the - * NMIs as notification NMI for lapic-pt VMs temporarily. - * - * TODO: Add a way to check the NMI is for notification or not in order to support vNMI. - */ - ret = is_lapic_pt_configured(vm); - - return ret; -} diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 1aabc9567..69dddb1db 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -125,7 +125,7 @@ struct thread_object *sched_get_current(uint16_t pcpu_id) } /** - * @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_NMI + * @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT */ void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode) { @@ -137,8 +137,8 @@ void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode) case DEL_MODE_IPI: send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR); break; - case DEL_MODE_NMI: - send_single_nmi(pcpu_id); + case DEL_MODE_INIT: + send_single_init(pcpu_id); break; default: ASSERT(false, "Unknown delivery mode %u for pCPU%u", delmode, pcpu_id); @@ -202,8 +202,8 @@ void sleep_thread(struct thread_object *obj) scheduler->sleep(obj); } if (is_running(obj)) { - if (obj->notify_mode == SCHED_NOTIFY_NMI) { - make_reschedule_request(pcpu_id, DEL_MODE_NMI); + if (obj->notify_mode == SCHED_NOTIFY_INIT) { + make_reschedule_request(pcpu_id, DEL_MODE_INIT); } else { make_reschedule_request(pcpu_id, DEL_MODE_IPI); } diff --git a/hypervisor/include/arch/x86/asm/lapic.h b/hypervisor/include/arch/x86/asm/lapic.h index 1e78daf97..a5f4b9a76 100644 --- a/hypervisor/include/arch/x86/asm/lapic.h +++ b/hypervisor/include/arch/x86/asm/lapic.h @@ -118,12 +118,12 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector); /* End of ipi_ext_apis */ /** - * @brief Send an NMI signal to a single pCPU + * @brief Send an INIT signal to a single pCPU * * @param[in] pcpu_id The id of destination physical cpu * * @return None */ -void send_single_nmi(uint16_t pcpu_id); +void send_single_init(uint16_t pcpu_id); #endif /* ARCH_X86_LAPIC_H */ diff --git a/hypervisor/include/arch/x86/asm/notify.h b/hypervisor/include/arch/x86/asm/notify.h index 0aede50e1..495181d42 100644 --- a/hypervisor/include/arch/x86/asm/notify.h +++ b/hypervisor/include/arch/x86/asm/notify.h @@ -15,7 +15,6 @@ struct smp_call_info_data { struct acrn_vm; void smp_call_function(uint64_t mask, smp_call_func_t func, void *data); -bool is_notification_nmi(const struct acrn_vm *vm); void setup_notification(void); void setup_pi_notification(void); diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h index bcebf2fa1..b88d0950e 100644 --- a/hypervisor/include/common/schedule.h +++ b/hypervisor/include/common/schedule.h @@ -12,7 +12,7 @@ #define NEED_RESCHEDULE (1U) -#define DEL_MODE_NMI (1U) +#define DEL_MODE_INIT (1U) #define DEL_MODE_IPI (2U) #define THREAD_DATA_SIZE (256U) @@ -24,7 +24,7 @@ enum thread_object_state { }; enum sched_notify_mode { - SCHED_NOTIFY_NMI, + SCHED_NOTIFY_INIT, SCHED_NOTIFY_IPI };