From f7f04ba67fe43f43657927160c01f77d7d413733 Mon Sep 17 00:00:00 2001 From: "Li, Fei1" Date: Sun, 22 Jul 2018 15:01:02 +0800 Subject: [PATCH] hv: mmu: minor fix about hv mmu && ept modify 1. fix some description for hv mmu_modify 2. add pml4_page input parameter for ept_mr_modify to keep align with ept_mr_add and ept_mr_del which will support add or delete MR for trusty. Signed-off-by: Li, Fei1 Acked-by: Eddie Dong --- hypervisor/arch/x86/ept.c | 9 ++--- hypervisor/arch/x86/guest/guest.c | 14 +++----- hypervisor/arch/x86/mtrr.c | 3 +- hypervisor/arch/x86/pagetable.c | 57 ++++++++++++++++--------------- hypervisor/include/arch/x86/mmu.h | 5 +-- 5 files changed, 43 insertions(+), 45 deletions(-) diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c index fc0dfbc68..2c9418143 100644 --- a/hypervisor/arch/x86/ept.c +++ b/hypervisor/arch/x86/ept.c @@ -518,15 +518,16 @@ int ept_mr_add(struct vm *vm, uint64_t hpa, return 0; } -int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size, - uint64_t attr_set, uint64_t attr_clr) +int ept_mr_modify(struct vm *vm, uint64_t *pml4_page, + uint64_t gpa, uint64_t size, + uint64_t prot_set, uint64_t prot_clr) { struct vcpu *vcpu; uint16_t i; int ret; - ret = mmu_modify((uint64_t *)vm->arch_vm.nworld_eptp, - gpa, size, attr_set, attr_clr, PTT_EPT); + ret = mmu_modify(pml4_page, gpa, size, + prot_set, prot_clr, PTT_EPT); foreach_vcpu(i, vm, vcpu) { vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); diff --git a/hypervisor/arch/x86/guest/guest.c b/hypervisor/arch/x86/guest/guest.c index 8813b5796..c1d8d0e88 100644 --- a/hypervisor/arch/x86/guest/guest.c +++ b/hypervisor/arch/x86/guest/guest.c @@ -597,14 +597,7 @@ static void rebuild_vm0_e820(void) int prepare_vm0_memmap_and_e820(struct vm *vm) { uint32_t i; - uint64_t attr_wb = (IA32E_EPT_R_BIT | - IA32E_EPT_W_BIT | - IA32E_EPT_X_BIT | - IA32E_EPT_WB); - uint64_t attr_uc = (IA32E_EPT_R_BIT | - IA32E_EPT_W_BIT | - IA32E_EPT_X_BIT | - IA32E_EPT_UNCACHED); + uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED); struct e820_entry *entry; uint64_t hv_hpa; @@ -622,8 +615,9 @@ int prepare_vm0_memmap_and_e820(struct vm *vm) for (i = 0U; i < e820_entries; i++) { entry = &e820[i]; if (entry->type == E820_TYPE_RAM) { - ept_mr_add(vm, entry->baseaddr, entry->baseaddr, - entry->length, attr_wb); + ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp, + entry->baseaddr, entry->length, + EPT_WB, EPT_MT_MASK); } } diff --git a/hypervisor/arch/x86/mtrr.c b/hypervisor/arch/x86/mtrr.c index 377b957e6..4811d6ee3 100755 --- a/hypervisor/arch/x86/mtrr.c +++ b/hypervisor/arch/x86/mtrr.c @@ -145,7 +145,8 @@ static uint32_t update_ept(struct vm *vm, uint64_t start, attr = IA32E_EPT_UNCACHED; } - ept_mr_modify(vm, start, size, attr, IA32E_EPT_MT_MASK); + ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp, + start, size, attr, IA32E_EPT_MT_MASK); return attr; } diff --git a/hypervisor/arch/x86/pagetable.c b/hypervisor/arch/x86/pagetable.c index 685953d96..6f52a2a96 100644 --- a/hypervisor/arch/x86/pagetable.c +++ b/hypervisor/arch/x86/pagetable.c @@ -56,45 +56,51 @@ static int split_large_page(uint64_t *pte, return 0; } +static inline void __modify_pte(uint64_t *pte, + uint64_t prot_set, uint64_t prot_clr) +{ + uint64_t new_pte = *pte; + new_pte &= ~prot_clr; + new_pte |= prot_set; + set_pte(pte, new_pte); +} + /* - * In PT level, modify [vaddr_start, vaddr_end) MR PTA. + * In PT level, + * modify [vaddr_start, vaddr_end) memory type or page access right. */ static int modify_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vaddr_end, uint64_t prot_set, uint64_t prot_clr, enum _page_table_type ptt) { - uint64_t *pd_page = pde_page_vaddr(*pde); + uint64_t *pt_page = pde_page_vaddr(*pde); uint64_t vaddr = vaddr_start; uint64_t index = pte_index(vaddr); dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end); for (; index < PTRS_PER_PTE; index++) { - uint64_t new_pte, *pte = pd_page + index; - uint64_t vaddr_next = (vaddr & PTE_MASK) + PTE_SIZE; + uint64_t *pte = pt_page + index; if (pgentry_present(ptt, *pte) == 0UL) { pr_err("%s, invalid op, pte not present\n", __func__); return -EFAULT; } - new_pte = *pte; - new_pte &= ~prot_clr; - new_pte |= prot_set; - set_pte(pte, new_pte); - - if (vaddr_next >= vaddr_end) { + __modify_pte(pte, prot_set, prot_clr); + vaddr += PTE_SIZE; + if (vaddr >= vaddr_end) { break; } - vaddr = vaddr_next; } return 0; } /* - * In PD level, modify [vaddr_start, vaddr_end) MR PTA. + * In PD level, + * modify [vaddr_start, vaddr_end) memory type or page access right. */ static int modify_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t vaddr_end, @@ -102,14 +108,14 @@ static int modify_pde(uint64_t *pdpte, enum _page_table_type ptt) { int ret = 0; - uint64_t *pdpt_page = pdpte_page_vaddr(*pdpte); + uint64_t *pd_page = pdpte_page_vaddr(*pdpte); uint64_t vaddr = vaddr_start; uint64_t index = pde_index(vaddr); dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end); for (; index < PTRS_PER_PDE; index++) { - uint64_t *pde = pdpt_page + index; + uint64_t *pde = pd_page + index; uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE; if (pgentry_present(ptt, *pde) == 0UL) { @@ -123,10 +129,7 @@ static int modify_pde(uint64_t *pdpte, return ret; } } else { - uint64_t new_pde = *pde; - new_pde &= ~prot_clr; - new_pde |= prot_set; - set_pte(pde, new_pde); + __modify_pte(pde, prot_set, prot_clr); if (vaddr_next < vaddr_end) { vaddr = vaddr_next; continue; @@ -146,7 +149,8 @@ static int modify_pde(uint64_t *pdpte, } /* - * In PDPT level, modify [vaddr, vaddr_end) MR PTA. + * In PDPT level, + * modify [vaddr_start, vaddr_end) memory type or page access right. */ static int modify_pdpte(uint64_t *pml4e, uint64_t vaddr_start, uint64_t vaddr_end, @@ -154,14 +158,14 @@ static int modify_pdpte(uint64_t *pml4e, enum _page_table_type ptt) { int ret = 0; - uint64_t *pml4_page = pml4e_page_vaddr(*pml4e); + uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e); uint64_t vaddr = vaddr_start; uint64_t index = pdpte_index(vaddr); dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", __func__, vaddr, vaddr_end); for (; index < PTRS_PER_PDPTE; index++) { - uint64_t *pdpte = pml4_page + index; + uint64_t *pdpte = pdpt_page + index; uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE; if (pgentry_present(ptt, *pdpte) == 0UL) { @@ -175,10 +179,7 @@ static int modify_pdpte(uint64_t *pml4e, return ret; } } else { - uint64_t new_pdpte = *pdpte; - new_pdpte &= ~prot_clr; - new_pdpte |= prot_set; - set_pte(pdpte, new_pdpte); + __modify_pte(pdpte, prot_set, prot_clr); if (vaddr_next < vaddr_end) { vaddr = vaddr_next; continue; @@ -198,9 +199,9 @@ static int modify_pdpte(uint64_t *pml4e, } /* - * modify [vaddr, vaddr + size ) memory region page table attributes. - * prot_clr - attributes want to be clear - * prot_set - attributes want to be set + * modify [vaddr, vaddr + size ) memory type or page access right. + * prot_clr - memory type or page access right want to be clear + * prot_set - memory type or page access right want to be set * @pre: the prot_set and prot_clr should set before call this function. * If you just want to modify access rights, you can just set the prot_clr * to what you want to set, prot_clr to what you want to clear. But if you diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h index c9a0d9850..39506a8ac 100644 --- a/hypervisor/include/arch/x86/mmu.h +++ b/hypervisor/include/arch/x86/mmu.h @@ -400,8 +400,9 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size); uint64_t hpa2gpa(struct vm *vm, uint64_t hpa); int ept_mr_add(struct vm *vm, uint64_t hpa, uint64_t gpa, uint64_t size, uint32_t prot); -int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size, - uint64_t attr_set, uint64_t attr_clr); +int ept_mr_modify(struct vm *vm, uint64_t *pml4_page, + uint64_t gpa, uint64_t size, + uint64_t prot_set, uint64_t prot_clr); int ept_mr_del(struct vm *vm, uint64_t hpa, uint64_t gpa, uint64_t size);