From 27d5711b628e7023c7241f7f959168a55c5c0ee1 Mon Sep 17 00:00:00 2001 From: Jie Deng Date: Mon, 28 Dec 2020 14:17:21 +0800 Subject: [PATCH] hv: add a cache register for VMX_PROC_VM_EXEC_CONTROLS This patch adds a cache register for VMX_PROC_VM_EXEC_CONTROLS to avoid the frequent VMCS access. Tracked-On: #5605 Signed-off-by: Jie Deng --- hypervisor/arch/x86/guest/virq.c | 27 +++++++----------------- hypervisor/arch/x86/guest/vmcs.c | 9 ++++---- hypervisor/arch/x86/guest/vmexit.c | 7 ++---- hypervisor/include/arch/x86/guest/vcpu.h | 1 + 4 files changed, 15 insertions(+), 29 deletions(-) diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c index bc7a1ed63..f4d6515d0 100644 --- a/hypervisor/arch/x86/guest/virq.c +++ b/hypervisor/arch/x86/guest/virq.c @@ -311,17 +311,14 @@ void vcpu_inject_ss(struct acrn_vcpu *vcpu) int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu) { - uint32_t value32; - TRACE_2L(TRACE_VMEXIT_INTERRUPT_WINDOW, 0UL, 0UL); /* Disable interrupt-window exiting first. * acrn_handle_pending_request will continue handle for this vcpu */ vcpu->arch.irq_window_enabled = false; - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + vcpu->arch.proc_vm_exec_ctrls &= ~(VMX_PROCBASED_CTLS_IRQ_WIN); + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); vcpu_retain_rip(vcpu); return 0; @@ -363,7 +360,6 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu) { bool injected = false; int32_t ret = 0; - uint32_t tmp; struct acrn_vcpu_arch *arch = &vcpu->arch; uint64_t *pending_req_bits = &arch->pending_req; @@ -462,9 +458,8 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu) if (bitmap_test(ACRN_REQUEST_EXTINT, pending_req_bits) || bitmap_test(ACRN_REQUEST_NMI, pending_req_bits) || vlapic_has_pending_delivery_intr(vcpu)) { - tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - tmp |= VMX_PROCBASED_CTLS_IRQ_WIN; - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp); + vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_IRQ_WIN; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); arch->irq_window_enabled = true; } } @@ -554,7 +549,6 @@ static int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vect uint8_t inst[1]; uint32_t err_code = 0U; uint64_t fault_addr; - uint32_t value32; /* Queue the exception by default if the exception cannot be handled. */ *queue_exception = true; @@ -595,10 +589,8 @@ static int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vect vcpu->arch.inst_len = 1U; if (vcpu->vm->hw.created_vcpus > 1U) { /* Enable MTF to start single-stepping execution */ - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 |= VMX_PROCBASED_CTLS_MON_TRAP; - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); - + vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_MON_TRAP; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); vcpu->arch.emulating_lock = true; } @@ -716,15 +708,12 @@ int32_t exception_vmexit_handler(struct acrn_vcpu *vcpu) int32_t nmi_window_vmexit_handler(struct acrn_vcpu *vcpu) { - uint32_t value32; - /* * Disable NMI-window exiting here. We will process * the pending request in acrn_handle_pending_request later */ - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 &= ~VMX_PROCBASED_CTLS_NMI_WINEXIT; - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + vcpu->arch.proc_vm_exec_ctrls &= ~VMX_PROCBASED_CTLS_NMI_WINEXIT; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); vcpu_retain_rip(vcpu); diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index 65f64a538..2dec33340 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -288,7 +288,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu) * Enable VM_EXIT for rdpmc execution. */ value32 |= VMX_PROCBASED_CTLS_RDPMC; - + vcpu->arch.proc_vm_exec_ctrls = value32; exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); pr_dbg("VMX_PROC_VM_EXEC_CONTROLS: 0x%x ", value32); @@ -595,10 +595,9 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) value32 &= ~VMX_EXIT_CTLS_ACK_IRQ; exec_vmwrite32(VMX_EXIT_CONTROLS, value32); - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 &= ~VMX_PROCBASED_CTLS_TPR_SHADOW; - value32 &= ~VMX_PROCBASED_CTLS_HLT; - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + vcpu->arch.proc_vm_exec_ctrls &= ~VMX_PROCBASED_CTLS_TPR_SHADOW; + vcpu->arch.proc_vm_exec_ctrls &= ~VMX_PROCBASED_CTLS_HLT; + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); exec_vmwrite32(VMX_TPR_THRESHOLD, 0U); diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index e5aa012de..117523375 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -292,11 +292,8 @@ static void vcpu_complete_split_lock_emulation(struct acrn_vcpu *cur_vcpu) /* MTF is currently only used for split-lock emulation */ static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu) { - uint32_t value32; - - value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS); - value32 &= ~(VMX_PROCBASED_CTLS_MON_TRAP); - exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32); + vcpu->arch.proc_vm_exec_ctrls &= ~(VMX_PROCBASED_CTLS_MON_TRAP); + exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); vcpu_retain_rip(vcpu); diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 6bb17dd0a..5ff9336cd 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -244,6 +244,7 @@ struct acrn_vcpu_arch { uint32_t exit_reason; uint32_t idt_vectoring_info; uint64_t exit_qualification; + uint32_t proc_vm_exec_ctrls; uint32_t inst_len; /* Information related to secondary / AP VCPU start-up */