hv: mmu: minor fix about hv mmu && ept modify

1. fix some description for hv mmu_modify
2. add pml4_page input parameter for ept_mr_modify to keep align with
ept_mr_add and ept_mr_del which will support add or delete MR for trusty.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-07-22 15:01:02 +08:00 committed by lijinxia
parent 502e3e2e65
commit f7f04ba67f
5 changed files with 43 additions and 45 deletions

View File

@ -518,15 +518,16 @@ int ept_mr_add(struct vm *vm, uint64_t hpa,
return 0; return 0;
} }
int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size, int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
uint64_t attr_set, uint64_t attr_clr) uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr)
{ {
struct vcpu *vcpu; struct vcpu *vcpu;
uint16_t i; uint16_t i;
int ret; int ret;
ret = mmu_modify((uint64_t *)vm->arch_vm.nworld_eptp, ret = mmu_modify(pml4_page, gpa, size,
gpa, size, attr_set, attr_clr, PTT_EPT); prot_set, prot_clr, PTT_EPT);
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);

View File

@ -597,14 +597,7 @@ static void rebuild_vm0_e820(void)
int prepare_vm0_memmap_and_e820(struct vm *vm) int prepare_vm0_memmap_and_e820(struct vm *vm)
{ {
uint32_t i; uint32_t i;
uint64_t attr_wb = (IA32E_EPT_R_BIT | uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT |
IA32E_EPT_WB);
uint64_t attr_uc = (IA32E_EPT_R_BIT |
IA32E_EPT_W_BIT |
IA32E_EPT_X_BIT |
IA32E_EPT_UNCACHED);
struct e820_entry *entry; struct e820_entry *entry;
uint64_t hv_hpa; uint64_t hv_hpa;
@ -622,8 +615,9 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
for (i = 0U; i < e820_entries; i++) { for (i = 0U; i < e820_entries; i++) {
entry = &e820[i]; entry = &e820[i];
if (entry->type == E820_TYPE_RAM) { if (entry->type == E820_TYPE_RAM) {
ept_mr_add(vm, entry->baseaddr, entry->baseaddr, ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
entry->length, attr_wb); entry->baseaddr, entry->length,
EPT_WB, EPT_MT_MASK);
} }
} }

View File

@ -145,7 +145,8 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
attr = IA32E_EPT_UNCACHED; attr = IA32E_EPT_UNCACHED;
} }
ept_mr_modify(vm, start, size, attr, IA32E_EPT_MT_MASK); ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
start, size, attr, IA32E_EPT_MT_MASK);
return attr; return attr;
} }

View File

@ -56,45 +56,51 @@ static int split_large_page(uint64_t *pte,
return 0; return 0;
} }
static inline void __modify_pte(uint64_t *pte,
uint64_t prot_set, uint64_t prot_clr)
{
uint64_t new_pte = *pte;
new_pte &= ~prot_clr;
new_pte |= prot_set;
set_pte(pte, new_pte);
}
/* /*
* In PT level, modify [vaddr_start, vaddr_end) MR PTA. * In PT level,
* modify [vaddr_start, vaddr_end) memory type or page access right.
*/ */
static int modify_pte(uint64_t *pde, static int modify_pte(uint64_t *pde,
uint64_t vaddr_start, uint64_t vaddr_end, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr, uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt) enum _page_table_type ptt)
{ {
uint64_t *pd_page = pde_page_vaddr(*pde); uint64_t *pt_page = pde_page_vaddr(*pde);
uint64_t vaddr = vaddr_start; uint64_t vaddr = vaddr_start;
uint64_t index = pte_index(vaddr); uint64_t index = pte_index(vaddr);
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n",
__func__, vaddr, vaddr_end); __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PTE; index++) { for (; index < PTRS_PER_PTE; index++) {
uint64_t new_pte, *pte = pd_page + index; uint64_t *pte = pt_page + index;
uint64_t vaddr_next = (vaddr & PTE_MASK) + PTE_SIZE;
if (pgentry_present(ptt, *pte) == 0UL) { if (pgentry_present(ptt, *pte) == 0UL) {
pr_err("%s, invalid op, pte not present\n", __func__); pr_err("%s, invalid op, pte not present\n", __func__);
return -EFAULT; return -EFAULT;
} }
new_pte = *pte; __modify_pte(pte, prot_set, prot_clr);
new_pte &= ~prot_clr; vaddr += PTE_SIZE;
new_pte |= prot_set; if (vaddr >= vaddr_end) {
set_pte(pte, new_pte);
if (vaddr_next >= vaddr_end) {
break; break;
} }
vaddr = vaddr_next;
} }
return 0; return 0;
} }
/* /*
* In PD level, modify [vaddr_start, vaddr_end) MR PTA. * In PD level,
* modify [vaddr_start, vaddr_end) memory type or page access right.
*/ */
static int modify_pde(uint64_t *pdpte, static int modify_pde(uint64_t *pdpte,
uint64_t vaddr_start, uint64_t vaddr_end, uint64_t vaddr_start, uint64_t vaddr_end,
@ -102,14 +108,14 @@ static int modify_pde(uint64_t *pdpte,
enum _page_table_type ptt) enum _page_table_type ptt)
{ {
int ret = 0; int ret = 0;
uint64_t *pdpt_page = pdpte_page_vaddr(*pdpte); uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
uint64_t vaddr = vaddr_start; uint64_t vaddr = vaddr_start;
uint64_t index = pde_index(vaddr); uint64_t index = pde_index(vaddr);
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n",
__func__, vaddr, vaddr_end); __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PDE; index++) { for (; index < PTRS_PER_PDE; index++) {
uint64_t *pde = pdpt_page + index; uint64_t *pde = pd_page + index;
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE; uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
if (pgentry_present(ptt, *pde) == 0UL) { if (pgentry_present(ptt, *pde) == 0UL) {
@ -123,10 +129,7 @@ static int modify_pde(uint64_t *pdpte,
return ret; return ret;
} }
} else { } else {
uint64_t new_pde = *pde; __modify_pte(pde, prot_set, prot_clr);
new_pde &= ~prot_clr;
new_pde |= prot_set;
set_pte(pde, new_pde);
if (vaddr_next < vaddr_end) { if (vaddr_next < vaddr_end) {
vaddr = vaddr_next; vaddr = vaddr_next;
continue; continue;
@ -146,7 +149,8 @@ static int modify_pde(uint64_t *pdpte,
} }
/* /*
* In PDPT level, modify [vaddr, vaddr_end) MR PTA. * In PDPT level,
* modify [vaddr_start, vaddr_end) memory type or page access right.
*/ */
static int modify_pdpte(uint64_t *pml4e, static int modify_pdpte(uint64_t *pml4e,
uint64_t vaddr_start, uint64_t vaddr_end, uint64_t vaddr_start, uint64_t vaddr_end,
@ -154,14 +158,14 @@ static int modify_pdpte(uint64_t *pml4e,
enum _page_table_type ptt) enum _page_table_type ptt)
{ {
int ret = 0; int ret = 0;
uint64_t *pml4_page = pml4e_page_vaddr(*pml4e); uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
uint64_t vaddr = vaddr_start; uint64_t vaddr = vaddr_start;
uint64_t index = pdpte_index(vaddr); uint64_t index = pdpte_index(vaddr);
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n", dev_dbg(ACRN_DBG_MMU, "%s, vaddr: [0x%llx - 0x%llx]\n",
__func__, vaddr, vaddr_end); __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PDPTE; index++) { for (; index < PTRS_PER_PDPTE; index++) {
uint64_t *pdpte = pml4_page + index; uint64_t *pdpte = pdpt_page + index;
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE; uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
if (pgentry_present(ptt, *pdpte) == 0UL) { if (pgentry_present(ptt, *pdpte) == 0UL) {
@ -175,10 +179,7 @@ static int modify_pdpte(uint64_t *pml4e,
return ret; return ret;
} }
} else { } else {
uint64_t new_pdpte = *pdpte; __modify_pte(pdpte, prot_set, prot_clr);
new_pdpte &= ~prot_clr;
new_pdpte |= prot_set;
set_pte(pdpte, new_pdpte);
if (vaddr_next < vaddr_end) { if (vaddr_next < vaddr_end) {
vaddr = vaddr_next; vaddr = vaddr_next;
continue; continue;
@ -198,9 +199,9 @@ static int modify_pdpte(uint64_t *pml4e,
} }
/* /*
* modify [vaddr, vaddr + size ) memory region page table attributes. * modify [vaddr, vaddr + size ) memory type or page access right.
* prot_clr - attributes want to be clear * prot_clr - memory type or page access right want to be clear
* prot_set - attributes want to be set * prot_set - memory type or page access right want to be set
* @pre: the prot_set and prot_clr should set before call this function. * @pre: the prot_set and prot_clr should set before call this function.
* If you just want to modify access rights, you can just set the prot_clr * If you just want to modify access rights, you can just set the prot_clr
* to what you want to set, prot_clr to what you want to clear. But if you * to what you want to set, prot_clr to what you want to clear. But if you

View File

@ -400,8 +400,9 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa); uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
int ept_mr_add(struct vm *vm, uint64_t hpa, int ept_mr_add(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t prot); uint64_t gpa, uint64_t size, uint32_t prot);
int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size, int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
uint64_t attr_set, uint64_t attr_clr); uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr);
int ept_mr_del(struct vm *vm, uint64_t hpa, int ept_mr_del(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size); uint64_t gpa, uint64_t size);