hv: mmu: refine delete page table mapping

Merge mmu_modify with mmu_del to mmu_modify_or_del(..., type).
While type is MR_MODIFY, the actual action is doing mmu_modify; while
type is MR_DEL, the actual action is doing mmu_del.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-07-22 16:09:21 +08:00 committed by lijinxia
parent 34c6862a28
commit 236bb10e4d
6 changed files with 77 additions and 50 deletions

View File

@ -250,7 +250,9 @@ int register_mmio_emulation_handler(struct vm *vm,
* need to unmap it.
*/
if (is_vm0(vm)) {
ept_mr_del(vm, start, start, end - start);
ept_mr_del(vm,
(uint64_t *)vm->arch_vm.nworld_eptp,
start, end - start);
}
/* Return success */
@ -529,8 +531,8 @@ int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
uint16_t i;
int ret;
ret = mmu_modify(pml4_page, gpa, size,
prot_set, prot_clr, PTT_EPT);
ret = mmu_modify_or_del(pml4_page, gpa, size,
prot_set, prot_clr, PTT_EPT, MR_MODIFY);
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
@ -539,28 +541,31 @@ int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
return ret;
}
int ept_mr_del(struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size)
int ept_mr_del(struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size)
{
struct map_params map_params;
uint16_t i;
struct vcpu *vcpu;
uint64_t hpa = hpa_arg;
uint64_t gpa = gpa_arg;
uint16_t i;
int ret;
uint64_t hpa = gpa2hpa(vm, gpa);
/* Setup memory map parameters */
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
ret = mmu_modify_or_del(pml4_page, gpa, size,
0UL, 0UL, PTT_EPT, MR_DEL);
if (ret < 0) {
return ret;
}
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0U);
if (hpa != 0UL) {
ret = mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p,
hpa, size, 0UL, 0UL, PTT_EPT, MR_DEL);
}
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
dev_dbg(ACRN_DBG_EPT, "%s, hpa 0x%llx gpa 0x%llx size 0x%llx\n",
__func__, hpa, gpa, size);
dev_dbg(ACRN_DBG_EPT, "%s, gpa 0x%llx size 0x%llx\n",
__func__, gpa, size);
return 0;
}

View File

@ -639,7 +639,8 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = get_hv_image_base();
ept_mr_del(vm, hv_hpa, hv_hpa, CONFIG_RAM_SIZE);
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hv_hpa, CONFIG_RAM_SIZE);
return 0;
}

View File

@ -593,10 +593,10 @@ void init_paging(void)
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (entry->type == E820_TYPE_RAM) {
mmu_modify((uint64_t *)mmu_pml4_addr,
mmu_modify_or_del((uint64_t *)mmu_pml4_addr,
entry->baseaddr, entry->length,
PAGE_CACHE_WB, PAGE_CACHE_MASK,
PTT_HOST);
PTT_HOST, MR_MODIFY);
}
}
@ -604,8 +604,9 @@ void init_paging(void)
* to supervisor-mode for hypervisor owned memroy.
*/
hv_hpa = get_hv_image_base();
mmu_modify((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_RAM_SIZE,
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER, PTT_HOST);
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_RAM_SIZE,
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
PTT_HOST, MR_MODIFY);
/* Enable paging */
enable_paging(HVA2HPA(mmu_pml4_addr));

View File

@ -56,23 +56,30 @@ static int split_large_page(uint64_t *pte,
return 0;
}
static inline void __modify_pte(uint64_t *pte,
uint64_t prot_set, uint64_t prot_clr)
static inline void __modify_or_del_pte(uint64_t *pte,
uint64_t prot_set, uint64_t prot_clr, uint32_t type)
{
uint64_t new_pte = *pte;
new_pte &= ~prot_clr;
new_pte |= prot_set;
set_pte(pte, new_pte);
if (type == MR_MODIFY) {
uint64_t new_pte = *pte;
new_pte &= ~prot_clr;
new_pte |= prot_set;
set_pte(pte, new_pte);
} else {
set_pte(pte, 0);
}
}
/*
* In PT level,
* type: MR_MODIFY
* modify [vaddr_start, vaddr_end) memory type or page access right.
* type: MR_DEL
* delete [vaddr_start, vaddr_end) MT PT mapping
*/
static int modify_pte(uint64_t *pde,
static int modify_or_del_pte(uint64_t *pde,
uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt)
enum _page_table_type ptt, uint32_t type)
{
uint64_t *pt_page = pde_page_vaddr(*pde);
uint64_t vaddr = vaddr_start;
@ -88,7 +95,7 @@ static int modify_pte(uint64_t *pde,
return -EFAULT;
}
__modify_pte(pte, prot_set, prot_clr);
__modify_or_del_pte(pte, prot_set, prot_clr, type);
vaddr += PTE_SIZE;
if (vaddr >= vaddr_end) {
break;
@ -100,12 +107,15 @@ static int modify_pte(uint64_t *pde,
/*
* In PD level,
* type: MR_MODIFY
* modify [vaddr_start, vaddr_end) memory type or page access right.
* type: MR_DEL
* delete [vaddr_start, vaddr_end) MT PT mapping
*/
static int modify_pde(uint64_t *pdpte,
static int modify_or_del_pde(uint64_t *pdpte,
uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt)
enum _page_table_type ptt, uint32_t type)
{
int ret = 0;
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
@ -130,7 +140,8 @@ static int modify_pde(uint64_t *pdpte,
return ret;
}
} else {
__modify_pte(pde, prot_set, prot_clr);
__modify_or_del_pte(pde,
prot_set, prot_clr, type);
if (vaddr_next < vaddr_end) {
vaddr = vaddr_next;
continue;
@ -138,8 +149,8 @@ static int modify_pde(uint64_t *pdpte,
return 0;
}
}
ret = modify_pte(pde, vaddr, vaddr_end,
prot_set, prot_clr, ptt);
ret = modify_or_del_pte(pde, vaddr, vaddr_end,
prot_set, prot_clr, ptt, type);
if (ret != 0 || (vaddr_next >= vaddr_end)) {
return ret;
}
@ -151,12 +162,15 @@ static int modify_pde(uint64_t *pdpte,
/*
* In PDPT level,
* type: MR_MODIFY
* modify [vaddr_start, vaddr_end) memory type or page access right.
* type: MR_DEL
* delete [vaddr_start, vaddr_end) MT PT mapping
*/
static int modify_pdpte(uint64_t *pml4e,
static int modify_or_del_pdpte(uint64_t *pml4e,
uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt)
enum _page_table_type ptt, uint32_t type)
{
int ret = 0;
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
@ -181,7 +195,8 @@ static int modify_pdpte(uint64_t *pml4e,
return ret;
}
} else {
__modify_pte(pdpte, prot_set, prot_clr);
__modify_or_del_pte(pdpte,
prot_set, prot_clr, type);
if (vaddr_next < vaddr_end) {
vaddr = vaddr_next;
continue;
@ -189,8 +204,8 @@ static int modify_pdpte(uint64_t *pml4e,
return 0;
}
}
ret = modify_pde(pdpte, vaddr, vaddr_end,
prot_set, prot_clr, ptt);
ret = modify_or_del_pde(pdpte, vaddr, vaddr_end,
prot_set, prot_clr, ptt, type);
if (ret != 0 || (vaddr_next >= vaddr_end)) {
return ret;
}
@ -201,6 +216,7 @@ static int modify_pdpte(uint64_t *pml4e,
}
/*
* type: MR_MODIFY
* modify [vaddr, vaddr + size ) memory type or page access right.
* prot_clr - memory type or page access right want to be clear
* prot_set - memory type or page access right want to be set
@ -209,11 +225,13 @@ static int modify_pdpte(uint64_t *pml4e,
* to what you want to set, prot_clr to what you want to clear. But if you
* want to modify the MT, you should set the prot_set to what MT you want
* to set, prot_clr to the MT mask.
* type: MR_DEL
* delete [vaddr_base, vaddr_base + size ) memory region page table mapping.
*/
int mmu_modify(uint64_t *pml4_page,
int mmu_modify_or_del(uint64_t *pml4_page,
uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt)
enum _page_table_type ptt, uint32_t type)
{
uint64_t vaddr = vaddr_base;
uint64_t vaddr_next, vaddr_end;
@ -221,7 +239,8 @@ int mmu_modify(uint64_t *pml4_page,
int ret;
if (!MEM_ALIGNED_CHECK(vaddr, PAGE_SIZE_4K) ||
!MEM_ALIGNED_CHECK(size, PAGE_SIZE_4K)) {
!MEM_ALIGNED_CHECK(size, PAGE_SIZE_4K) ||
(type != MR_MODIFY && type != MR_DEL)) {
pr_err("%s, invalid parameters!\n", __func__);
return -EINVAL;
}
@ -236,8 +255,8 @@ int mmu_modify(uint64_t *pml4_page,
pr_err("%s, invalid op, pml4e not present\n", __func__);
return -EFAULT;
}
ret = modify_pdpte(pml4e, vaddr, vaddr_end,
prot_set, prot_clr, ptt);
ret = modify_or_del_pdpte(pml4e, vaddr, vaddr_end,
prot_set, prot_clr, ptt, type);
if (ret != 0) {
return ret;
}

View File

@ -482,7 +482,8 @@ static int32_t _set_vm_memory_region(struct vm *vm,
return ept_mr_add(target_vm, hpa,
region->gpa, region->size, prot);
} else {
return ept_mr_del(target_vm, hpa,
return ept_mr_del(target_vm,
(uint64_t *)target_vm->arch_vm.nworld_eptp,
region->gpa, region->size);
}

View File

@ -326,10 +326,10 @@ int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);
int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags);
int mmu_modify(uint64_t *pml4_page,
int mmu_modify_or_del(uint64_t *pml4_page,
uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr,
enum _page_table_type ptt);
enum _page_table_type ptt, uint32_t type);
int check_vmx_mmu_cap(void);
uint16_t allocate_vpid(void);
void flush_vpid_single(uint16_t vpid);
@ -403,8 +403,8 @@ int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr);
int ept_mr_del(struct vm *vm, uint64_t hpa_arg,
uint64_t gpa_arg, uint64_t size);
int ept_mr_del(struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size);
int ept_violation_vmexit_handler(struct vcpu *vcpu);
int ept_misconfig_vmexit_handler(struct vcpu *vcpu);