diff --git a/hypervisor/arch/x86/guest/ept.c b/hypervisor/arch/x86/guest/ept.c index 023d5b108..7d78dad41 100644 --- a/hypervisor/arch/x86/guest/ept.c +++ b/hypervisor/arch/x86/guest/ept.c @@ -180,6 +180,11 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t } } +void ept_clear_accessed(uint64_t *pge, __unused uint64_t size) +{ + *pge &= ~EPT_ACCESSED; +} + /** * @pre pge != NULL && size > 0. */ @@ -195,6 +200,7 @@ void ept_flush_leaf_page(uint64_t *pge, uint64_t size) flush_address_space(hva, size); clac(); } + ept_clear_accessed(pge, size); } /** @@ -217,7 +223,7 @@ void *get_ept_entry(struct acrn_vm *vm) /** * @pre vm != NULL && cb != NULL. */ -void walk_ept_table(struct acrn_vm *vm, pge_handler cb) +void walk_ept_table(struct acrn_vm *vm, pge_handler cb, bool only_accessed) { const struct memory_ops *mem_ops = &vm->arch_vm.ept_mem_ops; uint64_t *pml4e, *pdpte, *pde, *pte; @@ -225,12 +231,14 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb) for (i = 0UL; i < PTRS_PER_PML4E; i++) { pml4e = pml4e_offset((uint64_t *)get_ept_entry(vm), i << PML4E_SHIFT); - if (mem_ops->pgentry_present(*pml4e) == 0UL) { + if (mem_ops->pgentry_present(*pml4e) == 0UL || + (only_accessed && mem_ops->pgentry_accessed(*pml4e) == 0UL)) { continue; } for (j = 0UL; j < PTRS_PER_PDPTE; j++) { pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT); - if (mem_ops->pgentry_present(*pdpte) == 0UL) { + if (mem_ops->pgentry_present(*pdpte) == 0UL || + (only_accessed && mem_ops->pgentry_accessed(*pdpte) == 0UL)) { continue; } if (pdpte_large(*pdpte) != 0UL) { @@ -239,7 +247,8 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb) } for (k = 0UL; k < PTRS_PER_PDE; k++) { pde = pde_offset(pdpte, k << PDE_SHIFT); - if (mem_ops->pgentry_present(*pde) == 0UL) { + if (mem_ops->pgentry_present(*pde) == 0UL || + (only_accessed && mem_ops->pgentry_accessed(*pde) == 0UL)) { continue; } if (pde_large(*pde) != 0UL) { @@ -248,7 +257,8 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb) } for (m = 0UL; m < PTRS_PER_PTE; m++) { pte = pte_offset(pde, m << PTE_SHIFT); - if (mem_ops->pgentry_present(*pte) != 0UL) { + if (mem_ops->pgentry_present(*pte) != 0UL || + (only_accessed && mem_ops->pgentry_accessed(*pte) == 0UL)) { cb(pte, PTE_SIZE); } } diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index f927cd4b2..f8c734272 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -360,7 +360,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu) * TODO: introduce API to make this data driven based * on VMX_EPT_VPID_CAP */ - value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL; + value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL | (1UL << 6U); exec_vmwrite64(VMX_EPT_POINTER_FULL, value64); pr_dbg("VMX_EPT_POINTER: 0x%016lx ", value64); diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index 20bc51aa4..02a85bbf9 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -379,10 +379,10 @@ static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu) static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu) { - if (has_rt_vm() == false) { + if (has_rt_vm() == false || is_rt_vm(vcpu->vm)) { cache_flush_invalidate_all(); } else { - walk_ept_table(vcpu->vm, ept_flush_leaf_page); + walk_ept_table(vcpu->vm, ept_flush_leaf_page, true); } return 0; diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 51a7e0ec2..0b1bacaf4 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -37,6 +37,11 @@ static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused) { } +static inline uint64_t ppt_pgentry_accessed(uint64_t pte) +{ + return pte & PAGE_ACCESSED; +} + static inline uint64_t ppt_pgentry_present(uint64_t pte) { return pte & PAGE_PRESENT; @@ -71,6 +76,7 @@ const struct memory_ops ppt_mem_ops = { .large_page_enabled = true, .get_default_access_right = ppt_get_default_access_right, .pgentry_present = ppt_pgentry_present, + .pgentry_accessed = ppt_pgentry_accessed, .get_pml4_page = ppt_get_pml4_page, .get_pdpt_page = ppt_get_pdpt_page, .get_pd_page = ppt_get_pd_page, @@ -147,6 +153,11 @@ static inline uint64_t ept_pgentry_present(uint64_t pte) return pte & EPT_RWX; } +static inline uint64_t ept_pgentry_accessed(uint64_t pte) +{ + return pte & EPT_ACCESSED; +} + static inline void ept_clflush_pagewalk(const void* etry) { iommu_flush_cache(etry, sizeof(uint64_t)); @@ -250,6 +261,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id) mem_ops->info = &ept_pages_info[vm_id]; mem_ops->get_default_access_right = ept_get_default_access_right; mem_ops->pgentry_present = ept_pgentry_present; + mem_ops->pgentry_accessed = ept_pgentry_accessed; mem_ops->get_pml4_page = ept_get_pml4_page; mem_ops->get_pdpt_page = ept_get_pdpt_page; mem_ops->get_pd_page = ept_get_pd_page; diff --git a/hypervisor/include/arch/x86/guest/ept.h b/hypervisor/include/arch/x86/guest/ept.h index f72a8ed31..674ba3581 100644 --- a/hypervisor/include/arch/x86/guest/ept.h +++ b/hypervisor/include/arch/x86/guest/ept.h @@ -127,6 +127,7 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, * @return None */ void ept_flush_leaf_page(uint64_t *pge, uint64_t size); +void ept_clear_accessed(uint64_t *pge, uint64_t size); /** * @brief Get EPT pointer of the vm @@ -145,10 +146,11 @@ void *get_ept_entry(struct acrn_vm *vm); * @param[in] cb the pointer that points to walk_ept_table callback, the callback * will be invoked when getting a present page entry from EPT, and * the callback could get the page entry and page size parameters. + * @param[in] only_accessed walk accessed ept entries or not * * @return None */ -void walk_ept_table(struct acrn_vm *vm, pge_handler cb); +void walk_ept_table(struct acrn_vm *vm, pge_handler cb, bool only_accessed); /** * @brief EPT misconfiguration handling diff --git a/hypervisor/include/arch/x86/page.h b/hypervisor/include/arch/x86/page.h index bd8e791fd..51c994078 100644 --- a/hypervisor/include/arch/x86/page.h +++ b/hypervisor/include/arch/x86/page.h @@ -80,6 +80,7 @@ struct memory_ops { bool large_page_enabled; uint64_t (*get_default_access_right)(void); uint64_t (*pgentry_present)(uint64_t pte); + uint64_t (*pgentry_accessed)(uint64_t pte); struct page *(*get_pml4_page)(const union pgtable_pages_info *info); struct page *(*get_pdpt_page)(const union pgtable_pages_info *info, uint64_t gpa); struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa); diff --git a/hypervisor/include/arch/x86/pgtable.h b/hypervisor/include/arch/x86/pgtable.h index 37c9ba17a..131b72181 100644 --- a/hypervisor/include/arch/x86/pgtable.h +++ b/hypervisor/include/arch/x86/pgtable.h @@ -107,6 +107,8 @@ */ /* End of ept_mem_type */ +#define EPT_ACCESSED (1UL << 8U) + #define EPT_MT_MASK (7UL << EPT_MT_SHIFT) /* VTD: Second-Level Paging Entries: Snoop Control */ #define EPT_SNOOP_CTRL (1UL << 11U)