hv: Enable accessed bit in EPT paging

Also clear all accessed bit in pagetables when the VM do wbinvd. Then
the next wbinvd will benefit from it.

Tracked-On: #4703
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
Shuo A Liu 2020-04-20 14:48:35 +08:00 committed by wenlingz
parent c72d1936a4
commit a6ea34bc62
7 changed files with 36 additions and 9 deletions

View File

@ -180,6 +180,11 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t
}
}
void ept_clear_accessed(uint64_t *pge, __unused uint64_t size)
{
*pge &= ~EPT_ACCESSED;
}
/**
* @pre pge != NULL && size > 0.
*/
@ -195,6 +200,7 @@ void ept_flush_leaf_page(uint64_t *pge, uint64_t size)
flush_address_space(hva, size);
clac();
}
ept_clear_accessed(pge, size);
}
/**
@ -217,7 +223,7 @@ void *get_ept_entry(struct acrn_vm *vm)
/**
* @pre vm != NULL && cb != NULL.
*/
void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
void walk_ept_table(struct acrn_vm *vm, pge_handler cb, bool only_accessed)
{
const struct memory_ops *mem_ops = &vm->arch_vm.ept_mem_ops;
uint64_t *pml4e, *pdpte, *pde, *pte;
@ -225,12 +231,14 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
for (i = 0UL; i < PTRS_PER_PML4E; i++) {
pml4e = pml4e_offset((uint64_t *)get_ept_entry(vm), i << PML4E_SHIFT);
if (mem_ops->pgentry_present(*pml4e) == 0UL) {
if (mem_ops->pgentry_present(*pml4e) == 0UL ||
(only_accessed && mem_ops->pgentry_accessed(*pml4e) == 0UL)) {
continue;
}
for (j = 0UL; j < PTRS_PER_PDPTE; j++) {
pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT);
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
if (mem_ops->pgentry_present(*pdpte) == 0UL ||
(only_accessed && mem_ops->pgentry_accessed(*pdpte) == 0UL)) {
continue;
}
if (pdpte_large(*pdpte) != 0UL) {
@ -239,7 +247,8 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
}
for (k = 0UL; k < PTRS_PER_PDE; k++) {
pde = pde_offset(pdpte, k << PDE_SHIFT);
if (mem_ops->pgentry_present(*pde) == 0UL) {
if (mem_ops->pgentry_present(*pde) == 0UL ||
(only_accessed && mem_ops->pgentry_accessed(*pde) == 0UL)) {
continue;
}
if (pde_large(*pde) != 0UL) {
@ -248,7 +257,8 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
}
for (m = 0UL; m < PTRS_PER_PTE; m++) {
pte = pte_offset(pde, m << PTE_SHIFT);
if (mem_ops->pgentry_present(*pte) != 0UL) {
if (mem_ops->pgentry_present(*pte) != 0UL ||
(only_accessed && mem_ops->pgentry_accessed(*pte) == 0UL)) {
cb(pte, PTE_SIZE);
}
}

View File

@ -360,7 +360,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
* TODO: introduce API to make this data driven based
* on VMX_EPT_VPID_CAP
*/
value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL;
value64 = hva2hpa(vm->arch_vm.nworld_eptp) | (3UL << 3U) | 6UL | (1UL << 6U);
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
pr_dbg("VMX_EPT_POINTER: 0x%016lx ", value64);

View File

@ -379,10 +379,10 @@ static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
{
if (has_rt_vm() == false) {
if (has_rt_vm() == false || is_rt_vm(vcpu->vm)) {
cache_flush_invalidate_all();
} else {
walk_ept_table(vcpu->vm, ept_flush_leaf_page);
walk_ept_table(vcpu->vm, ept_flush_leaf_page, true);
}
return 0;

View File

@ -37,6 +37,11 @@ static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused)
{
}
static inline uint64_t ppt_pgentry_accessed(uint64_t pte)
{
return pte & PAGE_ACCESSED;
}
static inline uint64_t ppt_pgentry_present(uint64_t pte)
{
return pte & PAGE_PRESENT;
@ -71,6 +76,7 @@ const struct memory_ops ppt_mem_ops = {
.large_page_enabled = true,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.pgentry_accessed = ppt_pgentry_accessed,
.get_pml4_page = ppt_get_pml4_page,
.get_pdpt_page = ppt_get_pdpt_page,
.get_pd_page = ppt_get_pd_page,
@ -147,6 +153,11 @@ static inline uint64_t ept_pgentry_present(uint64_t pte)
return pte & EPT_RWX;
}
static inline uint64_t ept_pgentry_accessed(uint64_t pte)
{
return pte & EPT_ACCESSED;
}
static inline void ept_clflush_pagewalk(const void* etry)
{
iommu_flush_cache(etry, sizeof(uint64_t));
@ -250,6 +261,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->info = &ept_pages_info[vm_id];
mem_ops->get_default_access_right = ept_get_default_access_right;
mem_ops->pgentry_present = ept_pgentry_present;
mem_ops->pgentry_accessed = ept_pgentry_accessed;
mem_ops->get_pml4_page = ept_get_pml4_page;
mem_ops->get_pdpt_page = ept_get_pdpt_page;
mem_ops->get_pd_page = ept_get_pd_page;

View File

@ -127,6 +127,7 @@ void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
* @return None
*/
void ept_flush_leaf_page(uint64_t *pge, uint64_t size);
void ept_clear_accessed(uint64_t *pge, uint64_t size);
/**
* @brief Get EPT pointer of the vm
@ -145,10 +146,11 @@ void *get_ept_entry(struct acrn_vm *vm);
* @param[in] cb the pointer that points to walk_ept_table callback, the callback
* will be invoked when getting a present page entry from EPT, and
* the callback could get the page entry and page size parameters.
* @param[in] only_accessed walk accessed ept entries or not
*
* @return None
*/
void walk_ept_table(struct acrn_vm *vm, pge_handler cb);
void walk_ept_table(struct acrn_vm *vm, pge_handler cb, bool only_accessed);
/**
* @brief EPT misconfiguration handling

View File

@ -80,6 +80,7 @@ struct memory_ops {
bool large_page_enabled;
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
uint64_t (*pgentry_accessed)(uint64_t pte);
struct page *(*get_pml4_page)(const union pgtable_pages_info *info);
struct page *(*get_pdpt_page)(const union pgtable_pages_info *info, uint64_t gpa);
struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa);

View File

@ -107,6 +107,8 @@
*/
/* End of ept_mem_type */
#define EPT_ACCESSED (1UL << 8U)
#define EPT_MT_MASK (7UL << EPT_MT_SHIFT)
/* VTD: Second-Level Paging Entries: Snoop Control */
#define EPT_SNOOP_CTRL (1UL << 11U)