diff --git a/hypervisor/arch/x86/guest/vmx_io.c b/hypervisor/arch/x86/guest/vmx_io.c index 0b79fed44..62acb3661 100644 --- a/hypervisor/arch/x86/guest/vmx_io.c +++ b/hypervisor/arch/x86/guest/vmx_io.c @@ -113,6 +113,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu) /*caused by instruction fetch */ if ((exit_qual & 0x4UL) != 0UL) { + /* TODO: check wehther the gpa is not a MMIO address. */ if (vcpu->arch.cur_context == NORMAL_WORLD) { ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.nworld_eptp, gpa & PAGE_MASK, PAGE_SIZE, EPT_EXE, 0UL); diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 4fbc96397..6f932b540 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -88,7 +88,7 @@ void free_page(struct page_pool *pool, struct page *page) } /* @pre: The PPT and EPT have same page granularity */ -static inline bool large_page_support(enum _page_table_level level) +static inline bool large_page_support(enum _page_table_level level, __unused uint64_t prot) { bool support; @@ -199,9 +199,20 @@ void *get_reserve_sworld_memory_base(void) return post_uos_sworld_memory; } -static inline bool large_page_not_support(__unused enum _page_table_level level) +/* + * Pages without execution right, such as MMIO, can always use large page + * base on hardware capability, even if the VM is an RTVM. This can save + * page table page # and improve TLB hit rate. + */ +static inline bool use_large_page(enum _page_table_level level, uint64_t prot) { - return false; + bool ret = false; /* for code page */ + + if ((prot & EPT_EXE) == 0UL) { + ret = large_page_support(level, prot); + } + + return ret; } static inline uint64_t ept_get_default_access_right(void) @@ -263,9 +274,9 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id) if (is_ept_force_4k_ipage()) { mem_ops->tweak_exe_right = ept_tweak_exe_right; mem_ops->recover_exe_right = ept_recover_exe_right; - /* For RTVM, build 4KB page mapping in EPT */ + /* For RTVM, build 4KB page mapping in EPT for code pages */ if (is_rt_vm(vm)) { - mem_ops->large_page_support = large_page_not_support; + mem_ops->large_page_support = use_large_page; } } else { mem_ops->tweak_exe_right = nop_tweak_exe_right; diff --git a/hypervisor/arch/x86/pagetable.c b/hypervisor/arch/x86/pagetable.c index cf2b70ab3..f8cfcfc1d 100644 --- a/hypervisor/arch/x86/pagetable.c +++ b/hypervisor/arch/x86/pagetable.c @@ -321,7 +321,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_ pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr); } else { if (mem_ops->pgentry_present(*pde) == 0UL) { - if (mem_ops->large_page_support(IA32E_PD) && + if (mem_ops->large_page_support(IA32E_PD, prot) && mem_aligned_check(paddr, PDE_SIZE) && mem_aligned_check(vaddr, PDE_SIZE) && (vaddr_next <= vaddr_end)) { @@ -370,7 +370,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr); } else { if (mem_ops->pgentry_present(*pdpte) == 0UL) { - if (mem_ops->large_page_support(IA32E_PDPT) && + if (mem_ops->large_page_support(IA32E_PDPT, prot) && mem_aligned_check(paddr, PDPTE_SIZE) && mem_aligned_check(vaddr, PDPTE_SIZE) && (vaddr_next <= vaddr_end)) { diff --git a/hypervisor/include/arch/x86/page.h b/hypervisor/include/arch/x86/page.h index 7b76a282c..1094fa30d 100644 --- a/hypervisor/include/arch/x86/page.h +++ b/hypervisor/include/arch/x86/page.h @@ -68,7 +68,7 @@ struct page_pool { struct memory_ops { struct page_pool *pool; - bool (*large_page_support)(enum _page_table_level level); + bool (*large_page_support)(enum _page_table_level level, uint64_t prot); uint64_t (*get_default_access_right)(void); uint64_t (*pgentry_present)(uint64_t pte); void (*clflush_pagewalk)(const void *p);