mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-16 08:26:41 +00:00
hv: mmu: reimplement mmu_add to add page table mapping
The ept_mr_add still use the old map_mem. The old API will keep for a peroid until the SOS the same refine be merged. Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
c7799584dc
commit
e2516fa6a0
@ -509,6 +509,9 @@ int ept_mr_add(struct vm *vm, uint64_t hpa_arg,
|
||||
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
|
||||
prot |= IA32E_EPT_SNOOP_CTRL;
|
||||
}
|
||||
/* TODO: replace map_mem with mmu_add once SOS has add
|
||||
* HC_VM_WRITE_PROTECT_PAGE support.
|
||||
*/
|
||||
map_mem(&map_params, (void *)hpa,
|
||||
(void *)gpa, size, prot);
|
||||
|
||||
|
@ -523,13 +523,10 @@ void enable_smep(void)
|
||||
|
||||
void init_paging(void)
|
||||
{
|
||||
struct map_params map_params;
|
||||
struct e820_entry *entry;
|
||||
uint64_t hv_hpa;
|
||||
uint32_t i;
|
||||
int attr_uc = (MMU_MEM_ATTR_BIT_READ_WRITE |
|
||||
MMU_MEM_ATTR_BIT_USER_ACCESSIBLE |
|
||||
MMU_MEM_ATTR_TYPE_UNCACHED);
|
||||
uint64_t attr_uc = (PAGE_TABLE | PAGE_CACHE_UC);
|
||||
|
||||
pr_dbg("HV MMU Initialization");
|
||||
|
||||
@ -539,15 +536,10 @@ void init_paging(void)
|
||||
init_e820();
|
||||
obtain_e820_mem_info();
|
||||
|
||||
/* Loop through all memory regions in the e820 table */
|
||||
map_params.page_table_type = PTT_HOST;
|
||||
map_params.pml4_base = mmu_pml4_addr;
|
||||
|
||||
/* Map all memory regions to UC attribute */
|
||||
map_mem(&map_params, (void *)e820_mem.mem_bottom,
|
||||
(void *)e820_mem.mem_bottom,
|
||||
(e820_mem.mem_top - e820_mem.mem_bottom),
|
||||
attr_uc);
|
||||
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom,
|
||||
e820_mem.mem_bottom, e820_mem.mem_top - e820_mem.mem_bottom,
|
||||
attr_uc, PTT_HOST);
|
||||
|
||||
/* Modify WB attribute for E820_TYPE_RAM */
|
||||
for (i = 0U; i < e820_entries; i++) {
|
||||
|
@ -44,12 +44,12 @@ static int split_large_page(uint64_t *pte,
|
||||
|
||||
paddr = ref_paddr;
|
||||
for (i = 0UL; i < PTRS_PER_PTE; i++) {
|
||||
set_pte(pbase + i, paddr | ref_prot);
|
||||
set_pgentry(pbase + i, paddr | ref_prot);
|
||||
paddr += paddrinc;
|
||||
}
|
||||
|
||||
ref_prot = (ptt == PTT_HOST) ? PAGE_TABLE : EPT_RWX;
|
||||
set_pte(pte, HVA2HPA((void *)pbase) | ref_prot);
|
||||
set_pgentry(pte, HVA2HPA((void *)pbase) | ref_prot);
|
||||
|
||||
/* TODO: flush the TLB */
|
||||
|
||||
@ -63,12 +63,28 @@ static inline void __modify_or_del_pte(uint64_t *pte,
|
||||
uint64_t new_pte = *pte;
|
||||
new_pte &= ~prot_clr;
|
||||
new_pte |= prot_set;
|
||||
set_pte(pte, new_pte);
|
||||
set_pgentry(pte, new_pte);
|
||||
} else {
|
||||
set_pte(pte, 0);
|
||||
set_pgentry(pte, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* pgentry may means pml4e/pdpte/pde
|
||||
*/
|
||||
static inline int construct_pgentry(enum _page_table_type ptt, uint64_t *pde)
|
||||
{
|
||||
uint64_t prot;
|
||||
void *pd_page = alloc_paging_struct();
|
||||
if (pd_page == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
prot = (ptt == PTT_HOST) ? PAGE_TABLE: EPT_RWX;
|
||||
set_pgentry(pde, HVA2HPA(pd_page) | prot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PT level,
|
||||
* type: MR_MODIFY
|
||||
@ -264,3 +280,175 @@ int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PT level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pte(uint64_t *pde, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
uint64_t *pt_page = pde_page_vaddr(*pde);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n",
|
||||
__func__, paddr, vaddr_start, vaddr_end);
|
||||
for (; index < PTRS_PER_PTE; index++) {
|
||||
uint64_t *pte = pt_page + index;
|
||||
|
||||
if (pgentry_present(ptt, *pte) != 0UL) {
|
||||
pr_err("%s, invalid op, pte present\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
set_pgentry(pte, paddr | prot);
|
||||
paddr += PTE_SIZE;
|
||||
vaddr += PTE_SIZE;
|
||||
if (vaddr >= vaddr_end)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PD level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pd_page = pdpte_page_vaddr(*pdpte);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pde_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n",
|
||||
__func__, paddr, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDE; index++) {
|
||||
uint64_t *pde = pd_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pde) == 0UL) {
|
||||
if (MEM_ALIGNED_CHECK(paddr, PDE_SIZE) &&
|
||||
MEM_ALIGNED_CHECK(vaddr, PDE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
set_pgentry(pde, paddr | (prot | PAGE_PSE));
|
||||
if (vaddr_next < vaddr_end) {
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
ret = construct_pgentry(ptt, pde);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = add_pte(pde, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
}
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* In PDPT level,
|
||||
* add [vaddr_start, vaddr_end) to [paddr_base, ...) MT PT mapping
|
||||
*/
|
||||
static int add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
uint64_t vaddr_start, uint64_t vaddr_end,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
int ret = 0;
|
||||
uint64_t *pdpt_page = pml4e_page_vaddr(*pml4e);
|
||||
uint64_t vaddr = vaddr_start;
|
||||
uint64_t paddr = paddr_start;
|
||||
uint64_t index = pdpte_index(vaddr);
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr: 0x%llx, vaddr: [0x%llx - 0x%llx]\n",
|
||||
__func__, paddr, vaddr, vaddr_end);
|
||||
for (; index < PTRS_PER_PDPTE; index++) {
|
||||
uint64_t *pdpte = pdpt_page + index;
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pdpte) == 0UL) {
|
||||
if (MEM_ALIGNED_CHECK(paddr, PDPTE_SIZE) &&
|
||||
MEM_ALIGNED_CHECK(vaddr, PDPTE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
set_pgentry(pdpte, paddr | (prot | PAGE_PSE));
|
||||
if (vaddr_next < vaddr_end) {
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
continue;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
ret = construct_pgentry(ptt, pdpte);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = add_pde(pdpte, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0 || (vaddr_next >= vaddr_end)) {
|
||||
return ret;
|
||||
}
|
||||
paddr += (vaddr_next - vaddr);
|
||||
vaddr = vaddr_next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* action: MR_ADD
|
||||
* add [vaddr_base, vaddr_base + size ) memory region page table mapping.
|
||||
* @pre: the prot should set before call this function.
|
||||
*/
|
||||
int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot, enum _page_table_type ptt)
|
||||
{
|
||||
uint64_t vaddr, vaddr_next, vaddr_end;
|
||||
uint64_t paddr;
|
||||
uint64_t *pml4e;
|
||||
int ret;
|
||||
|
||||
dev_dbg(ACRN_DBG_MMU, "%s, paddr 0x%llx, vaddr 0x%llx, size 0x%llx\n",
|
||||
__func__, paddr_base, vaddr_base, size);
|
||||
|
||||
/* align address to page size*/
|
||||
vaddr = ROUND_PAGE_UP(vaddr_base);
|
||||
paddr = ROUND_PAGE_UP(paddr_base);
|
||||
vaddr_end = vaddr + ROUND_PAGE_DOWN(size);
|
||||
|
||||
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
pml4e = pml4e_offset(pml4_page, vaddr);
|
||||
if (pgentry_present(ptt, *pml4e) == 0UL) {
|
||||
ret = construct_pgentry(ptt, pml4e);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
paddr += (vaddr_next - vaddr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -324,6 +324,9 @@ void enable_smep(void);
|
||||
void init_paging(void);
|
||||
int map_mem(struct map_params *map_params, void *paddr, void *vaddr,
|
||||
uint64_t size, uint32_t flags);
|
||||
int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot, enum _page_table_type ptt);
|
||||
int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
uint64_t vaddr_base, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr,
|
||||
|
@ -68,12 +68,18 @@ static inline uint64_t *pte_offset(uint64_t *pde, uint64_t addr)
|
||||
return pde_page_vaddr(*pde) + pte_index(addr);
|
||||
}
|
||||
|
||||
static inline uint64_t get_pte(uint64_t *pte)
|
||||
/*
|
||||
* pgentry may means pml4e/pdpte/pde/pte
|
||||
*/
|
||||
static inline uint64_t get_pgentry(uint64_t *pte)
|
||||
{
|
||||
return *pte;
|
||||
}
|
||||
|
||||
static inline void set_pte(uint64_t *ptep, uint64_t pte)
|
||||
/*
|
||||
* pgentry may means pml4e/pdpte/pde/pte
|
||||
*/
|
||||
static inline void set_pgentry(uint64_t *ptep, uint64_t pte)
|
||||
{
|
||||
*ptep = pte;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user