mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 14:33:38 +00:00
hv: mmu: make page table operation no fault
Page table operation would not fault except: 1. the hypervisor it out of memory to allcate a page for page table operation 2. there is a bug with page table operation in hypervisor or devicemodle While we assue that these would not happened in our platform when release, so there is no need to check whether there is a fault with page table operation. However, for debug version, we would panic the hypervisor if we can't meet the conditions really. Tracked-On: #1124 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
1e084b08f2
commit
cc89e52d5b
@ -219,12 +219,11 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
|
||||
return status;
|
||||
}
|
||||
|
||||
int ept_mr_add(struct vm *vm, uint64_t *pml4_page,
|
||||
void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
|
||||
{
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu;
|
||||
int ret;
|
||||
uint64_t prot = prot_orig;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx ",
|
||||
@ -239,59 +238,51 @@ int ept_mr_add(struct vm *vm, uint64_t *pml4_page,
|
||||
prot |= EPT_SNOOP_CTRL;
|
||||
}
|
||||
|
||||
ret = mmu_add(pml4_page, hpa, gpa, size, prot, PTT_EPT);
|
||||
mmu_add(pml4_page, hpa, gpa, size, prot, PTT_EPT);
|
||||
/* No need to create inverted page tables for trusty memory */
|
||||
if (ret == 0 && ((void *)pml4_page == vm->arch_vm.nworld_eptp)) {
|
||||
ret = mmu_add((uint64_t *)vm->arch_vm.m2p,
|
||||
if ((void *)pml4_page == vm->arch_vm.nworld_eptp) {
|
||||
mmu_add((uint64_t *)vm->arch_vm.m2p,
|
||||
gpa, hpa, size, prot, PTT_EPT);
|
||||
}
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
|
||||
void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
uint16_t i;
|
||||
int ret;
|
||||
|
||||
ret = mmu_modify_or_del(pml4_page, gpa, size,
|
||||
mmu_modify_or_del(pml4_page, gpa, size,
|
||||
prot_set, prot_clr, PTT_EPT, MR_MODIFY);
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ept_mr_del(struct vm *vm, uint64_t *pml4_page,
|
||||
void ept_mr_del(struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
uint16_t i;
|
||||
int ret;
|
||||
uint64_t hpa = gpa2hpa(vm, gpa);
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n",
|
||||
__func__, vm->vm_id, gpa, size);
|
||||
|
||||
ret = mmu_modify_or_del(pml4_page, gpa, size,
|
||||
mmu_modify_or_del(pml4_page, gpa, size,
|
||||
0UL, 0UL, PTT_EPT, MR_DEL);
|
||||
if ((ret == 0) && (hpa != 0UL)) {
|
||||
ret = mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p,
|
||||
if ((void *)pml4_page == vm->arch_vm.nworld_eptp) {
|
||||
mmu_modify_or_del((uint64_t *)vm->arch_vm.m2p,
|
||||
hpa, size, 0UL, 0UL, PTT_EPT, MR_DEL);
|
||||
}
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ static inline int construct_pgentry(enum _page_table_type ptt, uint64_t *pde)
|
||||
* type: MR_DEL
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static int modify_or_del_pte(uint64_t *pde,
|
||||
static void modify_or_del_pte(uint64_t *pde,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
enum _page_table_type ptt, uint32_t type)
|
||||
@ -107,8 +107,7 @@ static int modify_or_del_pte(uint64_t *pde,
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
if (pgentry_present(ptt, *pte) == 0UL) {
|
||||
pr_err("%s, invalid op, pte not present\n", __func__);
|
||||
return -EFAULT;
|
||||
panic("invalid op, pte not present");
|
||||
}
|
||||
|
||||
local_modify_or_del_pte(pte, prot_set, prot_clr, type);
|
||||
@ -117,8 +116,6 @@ static int modify_or_del_pte(uint64_t *pde,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -128,12 +125,11 @@ static int modify_or_del_pte(uint64_t *pde,
|
||||
* type: MR_DEL
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static int modify_or_del_pde(uint64_t *pdpte,
|
||||
static void modify_or_del_pde(uint64_t *pdpte,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
enum _page_table_type ptt, uint32_t type)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t index = pde_index(vaddr);
|
||||
@ -145,15 +141,14 @@ static int modify_or_del_pde(uint64_t *pdpte,
|
||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pde) == 0UL) {
|
||||
pr_err("%s, invalid op, pde not present\n", __func__);
|
||||
return -EFAULT;
|
||||
panic("invalid op, pde not present");
|
||||
}
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
if (vaddr_next > vaddr_end ||
|
||||
!mem_aligned_check(vaddr, PDE_SIZE)) {
|
||||
ret = split_large_page(pde, IA32E_PD, ptt);
|
||||
int ret = split_large_page(pde, IA32E_PD, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
panic("split large PDE failed");
|
||||
}
|
||||
} else {
|
||||
local_modify_or_del_pte(pde,
|
||||
@ -162,18 +157,16 @@ static int modify_or_del_pde(uint64_t *pdpte,
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
break; /* done */
|
||||
}
|
||||
}
|
||||
ret = modify_or_del_pte(pde, vaddr, vaddr_end,
|
||||
modify_or_del_pte(pde, vaddr, vaddr_end,
|
||||
prot_set, prot_clr, ptt, type);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
}
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -183,12 +176,11 @@ static int modify_or_del_pde(uint64_t *pdpte,
|
||||
* type: MR_DEL
|
||||
* delete [vaddr_start, vaddr_end) MT PT mapping
|
||||
*/
|
||||
static int modify_or_del_pdpte(uint64_t *pml4e,
|
||||
static void modify_or_del_pdpte(uint64_t *pml4e,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
enum _page_table_type ptt, uint32_t type)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t index = pdpte_index(vaddr);
|
||||
@ -200,15 +192,14 @@ static int modify_or_del_pdpte(uint64_t *pml4e,
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pdpte) == 0UL) {
|
||||
pr_err("%s, invalid op, pdpte not present\n", __func__);
|
||||
return -EFAULT;
|
||||
panic("invalid op, pdpte not present");
|
||||
}
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
if (vaddr_next > vaddr_end ||
|
||||
!mem_aligned_check(vaddr, PDPTE_SIZE)) {
|
||||
ret = split_large_page(pdpte, IA32E_PDPT, ptt);
|
||||
int ret = split_large_page(pdpte, IA32E_PDPT, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
panic("split large PDPTE failed");
|
||||
}
|
||||
} else {
|
||||
local_modify_or_del_pte(pdpte,
|
||||
@ -217,18 +208,16 @@ static int modify_or_del_pdpte(uint64_t *pml4e,
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
break; /* done */
|
||||
}
|
||||
}
|
||||
ret = modify_or_del_pde(pdpte, vaddr, vaddr_end,
|
||||
modify_or_del_pde(pdpte, vaddr, vaddr_end,
|
||||
prot_set, prot_clr, ptt, type);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
}
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -244,50 +233,36 @@ static int modify_or_del_pdpte(uint64_t *pml4e,
|
||||
* type: MR_DEL
|
||||
* delete [vaddr_base, vaddr_base + size ) memory region page table mapping.
|
||||
*/
|
||||
int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
void mmu_modify_or_del(uint64_t *pml4_page,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
enum _page_table_type ptt, uint32_t type)
|
||||
{
|
||||
uint64_t vaddr = vaddr_base;
|
||||
uint64_t vaddr = round_page_up(vaddr_base);
|
||||
uint64_t vaddr_next, vaddr_end;
|
||||
uint64_t *pml4e;
|
||||
int ret;
|
||||
|
||||
if (!mem_aligned_check(vaddr, (uint64_t)PAGE_SIZE_4K) ||
|
||||
!mem_aligned_check(size, (uint64_t)PAGE_SIZE_4K) ||
|
||||
(type != MR_MODIFY && type != MR_DEL)) {
|
||||
pr_err("%s, invalid parameters!\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vaddr_end = vaddr + round_page_down(size);
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, vaddr: 0x%llx, size: 0x%llx\n",
|
||||
__func__, vaddr, size);
|
||||
vaddr_end = vaddr + size;
|
||||
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (pgentry_present(ptt, *pml4e) == 0UL) {
|
||||
pr_err("%s, invalid op, pml4e not present\n", __func__);
|
||||
return -EFAULT;
|
||||
panic("invalid op, pml4e not present");
|
||||
}
|
||||
ret = modify_or_del_pdpte(pml4e, vaddr, vaddr_end,
|
||||
modify_or_del_pdpte(pml4e, vaddr, vaddr_end,
|
||||
prot_set, prot_clr, ptt, type);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PT level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pte(uint64_t *pde, uint64_t paddr_start,
|
||||
static void add_pte(uint64_t *pde, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
@ -302,29 +277,25 @@ static int add_pte(uint64_t *pde, uint64_t paddr_start,
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
if (pgentry_present(ptt, *pte) != 0UL) {
|
||||
pr_err("%s, invalid op, pte present\n", __func__);
|
||||
return -EFAULT;
|
||||
panic("invalid op, pte present");
|
||||
}
|
||||
|
||||
set_pgentry(pte, paddr | prot);
|
||||
paddr += PTE_SIZE;
|
||||
vaddr += PTE_SIZE;
|
||||
if (vaddr >= vaddr_end)
|
||||
return 0;
|
||||
break; /* done */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PD level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
|
||||
static void add_pde(uint64_t *pdpte, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t paddr = paddr_start;
|
||||
@ -346,34 +317,31 @@ static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
break; /* done */
|
||||
} else {
|
||||
ret = construct_pgentry(ptt, pde);
|
||||
int ret = construct_pgentry(ptt, pde);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
panic("construct pde page table fail");
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = add_pte(pde, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
add_pte(pde, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
}
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PDPT level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
static void add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t paddr = paddr_start;
|
||||
@ -395,23 +363,21 @@ static int add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
break; /* done */
|
||||
} else {
|
||||
ret = construct_pgentry(ptt, pdpte);
|
||||
int ret = construct_pgentry(ptt, pdpte);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
panic("construct pdpte page table fail");
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = add_pde(pdpte, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
add_pde(pdpte, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (vaddr_next >= vaddr_end) {
|
||||
break; /* done */
|
||||
}
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -419,14 +385,13 @@ static int add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
* add [vaddr_base, vaddr_base + size ) memory region page table mapping.
|
||||
* @pre: the prot should set before call this function.
|
||||
*/
|
||||
int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
uint64_t vaddr, vaddr_next, vaddr_end;
|
||||
uint64_t paddr;
|
||||
uint64_t *pml4e;
|
||||
int ret;
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr 0x%llx, vaddr 0x%llx, size 0x%llx\n",
|
||||
__func__, paddr_base, vaddr_base, size);
|
||||
@ -440,21 +405,16 @@ int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (pgentry_present(ptt, *pml4e) == 0UL) {
|
||||
ret = construct_pgentry(ptt, pml4e);
|
||||
int ret = construct_pgentry(ptt, pml4e);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
panic("construct pml4e page table fail");
|
||||
}
|
||||
}
|
||||
ret = add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t *lookup_address(uint64_t *pml4_page,
|
||||
|
@ -160,16 +160,12 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
|
||||
}
|
||||
|
||||
/* restore memory to SOS ept mapping */
|
||||
if (ept_mr_add(vm0, vm0->arch_vm.nworld_eptp,
|
||||
hpa, gpa_sos, size, EPT_RWX | EPT_WB) != 0) {
|
||||
pr_warn("Restore trusty mem to SOS failed");
|
||||
}
|
||||
ept_mr_add(vm0, vm0->arch_vm.nworld_eptp,
|
||||
hpa, gpa_sos, size, EPT_RWX | EPT_WB);
|
||||
|
||||
/* Restore memory to guest normal world */
|
||||
if (ept_mr_add(vm, vm->arch_vm.nworld_eptp,
|
||||
hpa, gpa_uos, size, EPT_RWX | EPT_WB) != 0) {
|
||||
pr_warn("Restore trusty mem to nworld failed");
|
||||
}
|
||||
ept_mr_add(vm, vm->arch_vm.nworld_eptp,
|
||||
hpa, gpa_uos, size, EPT_RWX | EPT_WB);
|
||||
|
||||
/* Free trusty ept page-structures */
|
||||
pdpt_addr =
|
||||
|
@ -470,13 +470,14 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
|
||||
prot |= EPT_UNCACHED;
|
||||
}
|
||||
/* create gpa to hpa EPT mapping */
|
||||
return ept_mr_add(target_vm, pml4_page, hpa,
|
||||
ept_mr_add(target_vm, pml4_page, hpa,
|
||||
region->gpa, region->size, prot);
|
||||
} else {
|
||||
return ept_mr_del(target_vm, pml4_page,
|
||||
ept_mr_del(target_vm, pml4_page,
|
||||
region->gpa, region->size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -571,8 +572,10 @@ static int32_t write_protect_page(struct vm *vm, struct wp_data *wp)
|
||||
prot_set = (wp->set != 0U) ? 0UL : EPT_WR;
|
||||
prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
|
||||
|
||||
return ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
wp->gpa, CPU_PAGE_SIZE, prot_set, prot_clr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -185,33 +185,25 @@ static int vdev_pt_cfgread(struct pci_vdev *vdev, uint32_t offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vdev_pt_remap_bar(struct pci_vdev *vdev, uint32_t idx,
|
||||
static void vdev_pt_remap_bar(struct pci_vdev *vdev, uint32_t idx,
|
||||
uint32_t new_base)
|
||||
{
|
||||
int error = 0;
|
||||
struct vm *vm = vdev->vpci->vm;
|
||||
|
||||
if (vdev->bar[idx].base != 0UL) {
|
||||
error = ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
vdev->bar[idx].base,
|
||||
vdev->bar[idx].size);
|
||||
if (error != 0) {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_base != 0U) {
|
||||
/* Map the physical BAR in the guest MMIO space */
|
||||
error = ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||
vdev->pdev.bar[idx].base, /* HPA */
|
||||
new_base, /*GPA*/
|
||||
vdev->bar[idx].size,
|
||||
EPT_WR | EPT_RD | EPT_UNCACHED);
|
||||
if (error != 0) {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
static void vdev_pt_cfgwrite_bar(struct pci_vdev *vdev, uint32_t offset,
|
||||
@ -220,7 +212,6 @@ static void vdev_pt_cfgwrite_bar(struct pci_vdev *vdev, uint32_t offset,
|
||||
uint32_t idx;
|
||||
uint32_t new_bar, mask;
|
||||
bool bar_update_normal;
|
||||
int error;
|
||||
|
||||
if ((bytes != 4U) || ((offset & 0x3U) != 0U)) {
|
||||
return;
|
||||
@ -239,11 +230,8 @@ static void vdev_pt_cfgwrite_bar(struct pci_vdev *vdev, uint32_t offset,
|
||||
bar_update_normal = (new_bar_uos != (uint32_t)~0U);
|
||||
new_bar = new_bar_uos & mask;
|
||||
if (bar_update_normal) {
|
||||
error = vdev_pt_remap_bar(vdev, idx,
|
||||
vdev_pt_remap_bar(vdev, idx,
|
||||
pci_bar_base(new_bar));
|
||||
if (error != 0) {
|
||||
pr_err("vdev_pt_remap_bar failed: %d", idx);
|
||||
}
|
||||
|
||||
vdev->bar[idx].base = pci_bar_base(new_bar);
|
||||
}
|
||||
|
@ -77,10 +77,10 @@ void free_paging_struct(void *ptr);
|
||||
void enable_paging(uint64_t pml4_base_addr);
|
||||
void enable_smep(void);
|
||||
void init_paging(void);
|
||||
int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot, enum _page_table_type ptt);
|
||||
int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
void mmu_modify_or_del(uint64_t *pml4_page,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
enum _page_table_type ptt, uint32_t type);
|
||||
@ -130,11 +130,11 @@ void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
|
||||
int ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
|
||||
void ept_mr_add(struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
void ept_mr_modify(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size);
|
||||
void free_ept_mem(uint64_t *pml4_page);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user