diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 1f4b20a5f..d3118c8b0 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -83,6 +83,8 @@ int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle) } #endif + vcpu->arch_vcpu.vpid = allocate_vpid(); + /* Allocate VMCS region for this VCPU */ vcpu->arch_vcpu.vmcs = alloc_page(); ASSERT(vcpu->arch_vcpu.vmcs != NULL, ""); @@ -145,6 +147,15 @@ int start_vcpu(struct vcpu *vcpu) pr_info("VM %d Starting VCPU %d", vcpu->vm->attr.id, vcpu->vcpu_id); + if (vcpu->arch_vcpu.vpid) + exec_vmwrite(VMX_VPID, vcpu->arch_vcpu.vpid); + + /* + * A power-up or a reset invalidates all linear mappings, + * guest-physical mappings, and combined mappings + */ + flush_vpid_global(); + /* Set vcpu launched */ vcpu->launched = true; diff --git a/hypervisor/arch/x86/interrupt.c b/hypervisor/arch/x86/interrupt.c index ea6200259..a5e454788 100644 --- a/hypervisor/arch/x86/interrupt.c +++ b/hypervisor/arch/x86/interrupt.c @@ -375,6 +375,9 @@ int acrn_handle_pending_request(struct vcpu *vcpu) if (bitmap_test_and_clear(ACRN_REQUEST_EPT_FLUSH, pending_req_bits)) invept(vcpu); + if (bitmap_test_and_clear(ACRN_REQUEST_VPID_FLUSH, pending_req_bits)) + flush_vpid_single(vcpu->arch_vcpu.vpid); + if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_req_bits)) vioapic_update_tmr(vcpu); diff --git a/hypervisor/arch/x86/mmu.c b/hypervisor/arch/x86/mmu.c index e3b42911b..9af2d21cd 100644 --- a/hypervisor/arch/x86/mmu.c +++ b/hypervisor/arch/x86/mmu.c @@ -44,6 +44,14 @@ static struct vmx_capability { uint32_t vpid; } vmx_caps; +/* + * If the logical processor is in VMX non-root operation and + * the “enable VPID” VM-execution control is 1, the current VPID + * is the value of the VPID VM-execution control field in the VMCS. + * (VM entry ensures that this value is never 0000H). + */ +static int vmx_vpid_nr = VMX_MIN_NR_VPID; + #define INVEPT_TYPE_SINGLE_CONTEXT 1UL #define INVEPT_TYPE_ALL_CONTEXTS 2UL #define VMFAIL_INVALID_EPT_VPID \ @@ -61,6 +69,25 @@ struct invept_desc { uint64_t _res; }; +static inline void _invvpid(uint64_t type, int vpid, uint64_t gva) +{ + int error = 0; + + struct { + uint64_t vpid : 16; + uint64_t rsvd : 48; + uint64_t gva; + } operand = { vpid, 0, gva }; + + asm volatile ("invvpid %1, %2\n" + VMFAIL_INVALID_EPT_VPID + : "=r" (error) + : "m" (operand), "r" (type) + : "memory"); + + ASSERT(error == 0, "invvpid error"); +} + static inline void _invept(uint64_t type, struct invept_desc desc) { int error = 0; @@ -113,6 +140,38 @@ int check_vmx_mmu_cap(void) return 0; } +int allocate_vpid(void) +{ + int vpid = atomic_xadd(&vmx_vpid_nr, 1); + + /* TODO: vpid overflow */ + if (vpid >= VMX_MAX_NR_VPID) { + pr_err("%s, vpid overflow\n", __func__); + /* + * set vmx_vpid_nr to VMX_MAX_NR_VPID to disable vpid + * since next atomic_xadd will always large than + * VMX_MAX_NR_VPID. + */ + vmx_vpid_nr = VMX_MAX_NR_VPID; + vpid = 0; + } + + return vpid; +} + +void flush_vpid_single(int vpid) +{ + if (vpid == 0) + return; + + _invvpid(VMX_VPID_TYPE_SINGLE_CONTEXT, vpid, 0); +} + +void flush_vpid_global(void) +{ + _invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0, 0); +} + void invept(struct vcpu *vcpu) { struct invept_desc desc = {0}; diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index 19ed01ea9..e63adae77 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -1085,6 +1085,11 @@ static void init_exec_ctrl(struct vcpu *vcpu) value32 &= ~(VMX_PROCBASED_CTLS_CR3_LOAD | VMX_PROCBASED_CTLS_CR3_STORE); + /* + * Disable VM_EXIT for invlpg execution. + */ + value32 &= ~VMX_PROCBASED_CTLS_INVLPG; + if (is_vapic_supported()) { value32 |= VMX_PROCBASED_CTLS_TPR_SHADOW; } else { @@ -1106,6 +1111,11 @@ static void init_exec_ctrl(struct vcpu *vcpu) VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT); + if (vcpu->arch_vcpu.vpid) + value32 |= VMX_PROCBASED_CTLS2_VPID; + else + value32 &= ~VMX_PROCBASED_CTLS2_VPID; + if (is_vapic_supported()) { value32 |= VMX_PROCBASED_CTLS2_VAPIC; diff --git a/hypervisor/include/arch/x86/guest/guest.h b/hypervisor/include/arch/x86/guest/guest.h index b749962fa..7f81259aa 100644 --- a/hypervisor/include/arch/x86/guest/guest.h +++ b/hypervisor/include/arch/x86/guest/guest.h @@ -47,6 +47,7 @@ int get_req_info(char *str, int str_max); #define ACRN_REQUEST_TMR_UPDATE 4 #define ACRN_REQUEST_EPT_FLUSH 5 #define ACRN_REQUEST_TRP_FAULT 6 +#define ACRN_REQUEST_VPID_FLUSH 7 /* flush vpid tlb */ #define E820_MAX_ENTRIES 32 diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index eaa881834..da175797e 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -172,6 +172,7 @@ struct vcpu_arch { /* A pointer to the VMCS for this CPU. */ void *vmcs; + int vpid; /* Holds the information needed for IRQ/exception handling. */ struct { diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h index d90302020..326346a55 100644 --- a/hypervisor/include/arch/x86/mmu.h +++ b/hypervisor/include/arch/x86/mmu.h @@ -303,6 +303,9 @@ int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr, int modify_mem(struct map_params *map_params, void *paddr, void *vaddr, uint64_t size, uint32_t flags); int check_vmx_mmu_cap(void); +int allocate_vpid(void); +void flush_vpid_single(int vpid); +void flush_vpid_global(void); void invept(struct vcpu *vcpu); bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size); int obtain_last_page_table_entry(struct map_params *map_params, diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h index 7d3749257..71810e2c1 100644 --- a/hypervisor/include/arch/x86/vmx.h +++ b/hypervisor/include/arch/x86/vmx.h @@ -314,6 +314,14 @@ #define VMX_EPT_INVEPT_SINGLE_CONTEXT (1 << 25) #define VMX_EPT_INVEPT_GLOBAL_CONTEXT (1 << 26) +#define VMX_MIN_NR_VPID 1 +#define VMX_MAX_NR_VPID (1 << 5) + +#define VMX_VPID_TYPE_INDIVIDUAL_ADDR 0 +#define VMX_VPID_TYPE_SINGLE_CONTEXT 1 +#define VMX_VPID_TYPE_ALL_CONTEXT 2 +#define VMX_VPID_TYPE_SINGLE_NON_GLOBAL 3 + #define VMX_VPID_INVVPID (1 << 0) /* (32 - 32) */ #define VMX_VPID_INVVPID_INDIVIDUAL_ADDR (1 << 8) /* (40 - 32) */ #define VMX_VPID_INVVPID_SINGLE_CONTEXT (1 << 9) /* (41 - 32) */