hv: multiarch: move page table entry function

this patch moves function xx_offset and xx_index to common code,
Add arch interface arch_quirk/arch_pgtle_page_vaddr and
arch_pgtle_large.

Tracked-On: #8831
Signed-off-by: hangliu1 <hang1.liu@intel.com>
Reviewed-by: Liu, Yifan1 <yifan1.liu@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
hangliu1
2025-10-13 15:17:19 +08:00
committed by acrnsi-robot
parent 8be20c690b
commit 5cc6694eab
11 changed files with 216 additions and 232 deletions

View File

@@ -9,7 +9,7 @@
#include <mmu.h>
#include <vm.h>
#include <asm/guest/virq.h>
#include <asm/pgtable.h>
#include <pgtable.h>
#include <asm/guest/ept.h>
#include <asm/vmx.h>
#include <asm/vtd.h>
@@ -444,30 +444,30 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
uint64_t i, j, k, m;
for (i = 0UL; i < PTRS_PER_PGTL3E; i++) {
pml4e = pml4e_offset((uint64_t *)get_eptp(vm), i << PML4E_SHIFT);
pml4e = pgtl3e_offset((uint64_t *)get_eptp(vm), i << PML4E_SHIFT);
if (!table->pgentry_present(*pml4e)) {
continue;
}
for (j = 0UL; j < PTRS_PER_PGTL2E; j++) {
pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT);
pdpte = pgtl2e_offset(pml4e, j << PDPTE_SHIFT);
if (!table->pgentry_present(*pdpte)) {
continue;
}
if (pdpte_large(*pdpte) != 0UL) {
if (is_pgtl_large(*pdpte) != 0UL) {
cb(pdpte, PGTL2_SIZE);
continue;
}
for (k = 0UL; k < PTRS_PER_PGTL1E; k++) {
pde = pde_offset(pdpte, k << PDE_SHIFT);
pde = pgtl1e_offset(pdpte, k << PDE_SHIFT);
if (!table->pgentry_present(*pde)) {
continue;
}
if (pde_large(*pde) != 0UL) {
if (is_pgtl_large(*pde) != 0UL) {
cb(pde, PGTL1_SIZE);
continue;
}
for (m = 0UL; m < PTRS_PER_PGTL0E; m++) {
pte = pte_offset(pde, m << PTE_SHIFT);
pte = pgtl0e_offset(pde, m << PTE_SHIFT);
if (table->pgentry_present(*pte)) {
cb(pte, PGTL0_SIZE);
}

View File

@@ -14,6 +14,7 @@
#include <asm/mmu.h>
#include <asm/guest/ept.h>
#include <logmsg.h>
#include <pgtable.h>
struct page_walk_info {
uint64_t top_entry; /* Top level paging structure entry */

View File

@@ -16,6 +16,7 @@
#include <reloc.h>
#include <hypercall.h>
#include <logmsg.h>
#include <pgtable.h>
int is_tee_vm(struct acrn_vm *vm)
{

View File

@@ -13,6 +13,7 @@
#include <logmsg.h>
#include <asm/rtcm.h>
#include <ptdev.h>
#include <pgtable.h>
#define ENTRY_GPA_L 2U
#define ENTRY_GPA_HI 8U

View File

@@ -79,18 +79,18 @@ static void free_sept_table(uint64_t *shadow_eptp)
if (shadow_eptp) {
for (i = 0UL; i < PTRS_PER_PML4E; i++) {
shadow_pml4e = pml4e_offset(shadow_eptp, i << PML4E_SHIFT);
shadow_pml4e = pgtl3e_offset(shadow_eptp, i << PML4E_SHIFT);
if (!is_present_ept_entry(*shadow_pml4e)) {
continue;
}
for (j = 0UL; j < PTRS_PER_PDPTE; j++) {
shadow_pdpte = pdpte_offset(shadow_pml4e, j << PDPTE_SHIFT);
shadow_pdpte = pgtl2e_offset(shadow_pml4e, j << PDPTE_SHIFT);
if (!is_present_ept_entry(*shadow_pdpte) ||
is_leaf_ept_entry(*shadow_pdpte, PGT_LVL2)) {
continue;
}
for (k = 0UL; k < PTRS_PER_PDE; k++) {
shadow_pde = pde_offset(shadow_pdpte, k << PDE_SHIFT);
shadow_pde = pgtl1e_offset(shadow_pdpte, k << PDE_SHIFT);
if (!is_present_ept_entry(*shadow_pde) ||
is_leaf_ept_entry(*shadow_pde, PGT_LVL1)) {
continue;

View File

@@ -29,7 +29,7 @@
#include <types.h>
#include <atomic.h>
#include <asm/cpufeatures.h>
#include <asm/pgtable.h>
#include <pgtable.h>
#include <asm/cpu_caps.h>
#include <asm/vmx.h>
#include <reloc.h>

View File

@@ -0,0 +1,32 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
uint64_t arch_pgtl_page_paddr(uint64_t pgtle)
{
return pgtle & PFN_MASK;
}
/**
* @brief Check whether specified page table entry is pointing to huge page.
*
* PS(Page Size) flag indicates whether maps a 1-GByte page or 2MByte page or references a page directory table. This function
* checks this flag. This function is typically used in the context of setting up or modifying page tables where it's
* necessary to distinguish between large and regular page mappings.
*
* It returns the value that bit 7 is 1 if the specified pte maps a 1-GByte or 2MByte page, and 0 if references a page table.
*
* @param[in] pgtle The page directory pointer table entry to check.
*
* @return The value of PS flag in the PDPTE.
*
* @retval PAGE_PSE indicating mapping to a 1-GByte or 2MByte page.
* @retval 0 indicating reference to a page directory table.
*
* @pre N/A
*
* @post N/A
*/
uint64_t arch_pgtl_large(uint64_t pgtle)
{
return pgtle & PAGE_PSE;
}

View File

@@ -198,20 +198,21 @@ static inline void construct_pgentry(uint64_t *pte, void *pg_page, uint64_t prot
static void modify_or_del_pgtl0(uint64_t *pgtl1e, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
{
uint64_t *pgtl0_page = pde_page_vaddr(*pgtl1e);
uint64_t *pgtl0_page = page_addr(*pgtl1e);
uint64_t vaddr = vaddr_start;
uint64_t index = pte_index(vaddr);
uint64_t index = pgtl0e_index(vaddr);
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PGTL0E; index++) {
uint64_t *pgtl0e = pgtl0_page + index;
if (!table->pgentry_present(*pgtl0e)) {
/*suppress warning message for low memory (< 1MBytes),as service VM
* will update MTTR attributes for this region by default whether it
* is present or not.
/*FIXME: For x86, need to suppress warning message for low memory (< 1MBytes),
* as service VM will update MTTR attributes for this region by default
* whether it is present or not. if add the WA in the function update_ept_mem_type(),
* then no need to suppress the warning here.
*/
if ((type == MR_MODIFY) && (vaddr >= MEM_1M)) {
if (type == MR_MODIFY) {
pr_warn("%s, vaddr: 0x%lx pgtl0e is not present.\n", __func__, vaddr);
}
} else {
@@ -237,9 +238,9 @@ static void modify_or_del_pgtl0(uint64_t *pgtl1e, uint64_t vaddr_start, uint64_t
static void modify_or_del_pgtl1(uint64_t *pgtl2e, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
{
uint64_t *pgtl1_page = pdpte_page_vaddr(*pgtl2e);
uint64_t *pgtl1_page = page_addr(*pgtl2e);
uint64_t vaddr = vaddr_start;
uint64_t index = pde_index(vaddr);
uint64_t index = pgtl1e_index(vaddr);
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PGTL1E; index++) {
@@ -251,7 +252,7 @@ static void modify_or_del_pgtl1(uint64_t *pgtl2e, uint64_t vaddr_start, uint64_t
pr_warn("%s, addr: 0x%lx pgtl1e is not present.\n", __func__, vaddr);
}
} else {
if (pde_large(*pgtl1e) != 0UL) {
if (is_pgtl_large(*pgtl1e) != 0UL) {
if ((vaddr_next > vaddr_end) || (!mem_aligned_check(vaddr, PGTL1_SIZE))) {
split_large_page(pgtl1e, PGT_LVL1, vaddr, table);
} else {
@@ -284,9 +285,9 @@ static void modify_or_del_pgtl1(uint64_t *pgtl2e, uint64_t vaddr_start, uint64_t
static void modify_or_del_pgtl2(const uint64_t *pgtl3e, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
{
uint64_t *pgtl2_page = pml4e_page_vaddr(*pgtl3e);
uint64_t *pgtl2_page = page_addr(*pgtl3e);
uint64_t vaddr = vaddr_start;
uint64_t index = pdpte_index(vaddr);
uint64_t index = pgtl2e_index(vaddr);
dev_dbg(DBG_LEVEL_MMU, "%s, vaddr: [0x%lx - 0x%lx]\n", __func__, vaddr, vaddr_end);
for (; index < PTRS_PER_PGTL2E; index++) {
@@ -298,7 +299,7 @@ static void modify_or_del_pgtl2(const uint64_t *pgtl3e, uint64_t vaddr_start, ui
pr_warn("%s, vaddr: 0x%lx pgtl2e is not present.\n", __func__, vaddr);
}
} else {
if (pdpte_large(*pgtl2e) != 0UL) {
if (is_pgtl_large(*pgtl2e) != 0UL) {
if ((vaddr_next > vaddr_end) ||
(!mem_aligned_check(vaddr, PGTL2_SIZE))) {
split_large_page(pgtl2e, PGT_LVL2, vaddr, table);
@@ -390,7 +391,7 @@ void pgtable_modify_or_del_map(uint64_t *pgtl3_page, uint64_t vaddr_base, uint64
while (vaddr < vaddr_end) {
vaddr_next = (vaddr & PGTL3_MASK) + PGTL3_SIZE;
pgtl3e = pml4e_offset(pgtl3_page, vaddr);
pgtl3e = pgtl3e_offset(pgtl3_page, vaddr);
if ((!table->pgentry_present(*pgtl3e)) && (type == MR_MODIFY)) {
ASSERT(false, "invalid op, pgtl3e not present");
} else {
@@ -407,10 +408,10 @@ void pgtable_modify_or_del_map(uint64_t *pgtl3_page, uint64_t vaddr_base, uint64
static void add_pgtl0(const uint64_t *pgtl1e, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot, const struct pgtable *table)
{
uint64_t *pgtl0_page = pde_page_vaddr(*pgtl1e);
uint64_t *pgtl0_page = page_addr(*pgtl1e);
uint64_t vaddr = vaddr_start;
uint64_t paddr = paddr_start;
uint64_t index = pte_index(vaddr);
uint64_t index = pgtl0e_index(vaddr);
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
__func__, paddr, vaddr_start, vaddr_end);
@@ -438,10 +439,10 @@ static void add_pgtl0(const uint64_t *pgtl1e, uint64_t paddr_start, uint64_t vad
static void add_pgtl1(const uint64_t *pgtl2e, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot, const struct pgtable *table)
{
uint64_t *pgtl1_page = pdpte_page_vaddr(*pgtl2e);
uint64_t *pgtl1_page = page_addr(*pgtl2e);
uint64_t vaddr = vaddr_start;
uint64_t paddr = paddr_start;
uint64_t index = pde_index(vaddr);
uint64_t index = pgtl1e_index(vaddr);
uint64_t local_prot = prot;
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n",
@@ -450,7 +451,7 @@ static void add_pgtl1(const uint64_t *pgtl2e, uint64_t paddr_start, uint64_t vad
uint64_t *pgtl1e = pgtl1_page + index;
uint64_t vaddr_next = (vaddr & PGTL1_MASK) + PGTL1_SIZE;
if (pde_large(*pgtl1e) != 0UL) {
if (is_pgtl_large(*pgtl1e) != 0UL) {
pr_fatal("%s, pgtl1e 0x%lx is already present!\n", __func__, vaddr);
} else {
if (!table->pgentry_present(*pgtl1e)) {
@@ -488,10 +489,10 @@ static void add_pgtl1(const uint64_t *pgtl2e, uint64_t paddr_start, uint64_t vad
static void add_pgtl2(const uint64_t *pgtl3e, uint64_t paddr_start, uint64_t vaddr_start, uint64_t vaddr_end,
uint64_t prot, const struct pgtable *table)
{
uint64_t *pgtl2_page = pml4e_page_vaddr(*pgtl3e);
uint64_t *pgtl2_page = page_addr(*pgtl3e);
uint64_t vaddr = vaddr_start;
uint64_t paddr = paddr_start;
uint64_t index = pdpte_index(vaddr);
uint64_t index = pgtl2e_index(vaddr);
uint64_t local_prot = prot;
dev_dbg(DBG_LEVEL_MMU, "%s, paddr: 0x%lx, vaddr: [0x%lx - 0x%lx]\n", __func__, paddr, vaddr, vaddr_end);
@@ -499,7 +500,7 @@ static void add_pgtl2(const uint64_t *pgtl3e, uint64_t paddr_start, uint64_t vad
uint64_t *pgtl2e = pgtl2_page + index;
uint64_t vaddr_next = (vaddr & PGTL2_MASK) + PGTL2_SIZE;
if (pdpte_large(*pgtl2e) != 0UL) {
if (is_pgtl_large(*pgtl2e) != 0UL) {
pr_fatal("%s, pgtl2e 0x%lx is already present!\n", __func__, vaddr);
} else {
if (!table->pgentry_present(*pgtl2e)) {
@@ -587,7 +588,7 @@ void pgtable_add_map(uint64_t *pgtl3_page, uint64_t paddr_base, uint64_t vaddr_b
while (vaddr < vaddr_end) {
vaddr_next = (vaddr & PGTL3_MASK) + PGTL3_SIZE;
pgtl3e = pml4e_offset(pgtl3_page, vaddr);
pgtl3e = pgtl3e_offset(pgtl3_page, vaddr);
if (!table->pgentry_present(*pgtl3e)) {
void *pgtl2_page = alloc_page(table->pool);
construct_pgentry(pgtl3e, pgtl2_page, table->get_default_access_right(), table);
@@ -679,8 +680,8 @@ void *pgtable_create_trusty_root(const struct pgtable *table,
* copy PTPDEs from normal world EPT to secure world EPT,
* and remove execute access attribute in these entries
*/
dest_pdpte_p = pml4e_page_vaddr(sworld_pml4e);
src_pdpte_p = pml4e_page_vaddr(nworld_pml4e);
dest_pdpte_p = page_addr(sworld_pml4e);
src_pdpte_p = page_addr(nworld_pml4e);
for (i = 0U; i < (uint16_t)(PTRS_PER_PGTL2E - 1UL); i++) {
pdpte = get_pgentry(src_pdpte_p);
if ((pdpte & prot_table_present) != 0UL) {
@@ -735,25 +736,25 @@ const uint64_t *pgtable_lookup_entry(uint64_t *pgtl3_page, uint64_t addr, uint64
bool present = true;
uint64_t *pgtl3e, *pgtl2e, *pgtl1e, *pgtl0e;
pgtl3e = pml4e_offset(pgtl3_page, addr);
pgtl3e = pgtl3e_offset(pgtl3_page, addr);
present = table->pgentry_present(*pgtl3e);
if (present) {
pgtl2e = pdpte_offset(pgtl3e, addr);
pgtl2e = pgtl2e_offset(pgtl3e, addr);
present = table->pgentry_present(*pgtl2e);
if (present) {
if (pdpte_large(*pgtl2e) != 0UL) {
if (is_pgtl_large(*pgtl2e) != 0UL) {
*pg_size = PGTL2_SIZE;
pret = pgtl2e;
} else {
pgtl1e = pde_offset(pgtl2e, addr);
pgtl1e = pgtl1e_offset(pgtl2e, addr);
present = table->pgentry_present(*pgtl1e);
if (present) {
if (pde_large(*pgtl1e) != 0UL) {
if (is_pgtl_large(*pgtl1e) != 0UL) {
*pg_size = PGTL1_SIZE;
pret = pgtl1e;
} else {
pgtl0e = pte_offset(pgtl1e, addr);
pgtl0e = pgtl0e_offset(pgtl1e, addr);
present = table->pgentry_present(*pgtl0e);
if (present) {
*pg_size = PGTL0_SIZE;

View File

@@ -134,18 +134,18 @@ static void early_pgtable_map_uart(uint64_t addr)
CPU_CR_READ(cr3, &value);
/*assumpiton for map high mmio in early pagetable is that it is only used for
2MB page since 1G page may not available when memory width is 39bit */
pml4e = pml4e_offset((uint64_t *)value, addr);
pml4e = pgtl3e_offset((uint64_t *)value, addr);
/* address is above 512G */
if(!(*pml4e & PAGE_PRESENT)) {
*pml4e = hva2hpa_early(uart_pdpte_page) + (PAGE_PRESENT|PAGE_RW);
}
pdpte = pdpte_offset(pml4e, addr);
pdpte = pgtl2e_offset(pml4e, addr);
if(!(*pdpte & PAGE_PRESENT)) {
*(pdpte) = hva2hpa_early(uart_pde_page) + (PAGE_PRESENT|PAGE_RW);
pde = pde_offset(pdpte, addr);
pde = pgtl1e_offset(pdpte, addr);
*pde = (addr & PGTL1_MASK) + (PAGE_PRESENT|PAGE_RW|PAGE_PSE);
} else if(!(*pdpte & PAGE_PSE)) {
pde = pde_offset(pdpte, addr);
pde = pgtl1e_offset(pdpte, addr);
if(!(*pde & PAGE_PRESENT)) {
*pde = (addr & PGTL1_MASK) + (PAGE_PRESENT|PAGE_RW|PAGE_PSE);
}

View File

@@ -7,7 +7,6 @@
#define PGTABLE_H
#include <asm/page.h>
#include <pgtable.h>
/**
* @addtogroup hwmgmt_page
@@ -195,192 +194,6 @@ static inline uint64_t hva2hpa_early(void *x)
return (uint64_t)x;
}
static inline uint64_t pml4e_index(uint64_t address)
{
return (address >> PML4E_SHIFT) & (PTRS_PER_PML4E - 1UL);
}
static inline uint64_t pdpte_index(uint64_t address)
{
return (address >> PDPTE_SHIFT) & (PTRS_PER_PDPTE - 1UL);
}
static inline uint64_t pde_index(uint64_t address)
{
return (address >> PDE_SHIFT) & (PTRS_PER_PDE - 1UL);
}
static inline uint64_t pte_index(uint64_t address)
{
return (address >> PTE_SHIFT) & (PTRS_PER_PTE - 1UL);
}
static inline uint64_t *pml4e_page_vaddr(uint64_t pml4e)
{
return hpa2hva(pml4e & PML4E_PFN_MASK);
}
static inline uint64_t *pdpte_page_vaddr(uint64_t pdpte)
{
return hpa2hva(pdpte & PDPTE_PFN_MASK);
}
static inline uint64_t *pde_page_vaddr(uint64_t pde)
{
return hpa2hva(pde & PDE_PFN_MASK);
}
/**
* @brief Calculate the page map level-4 table entry(PML4E) for a specified input address.
*
* The page map level-4 table(PML4T) contains 512 entries, each of which points to a page directory pointer table(PDPT).
* Address has the index to the PML4E in PML4T. This function is used to calculate the address of PML4E. It is typically
* used during the page translation process.
*
* It will return a pointer to the page map level-4 table entry(PML4E).
*
* @param[in] pml4_page A pointer to a page map level-4 table(PML4T) page.
* @param[in] addr The address value for which the page map level-4 table entry(PML4E) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PML4E.
*
* @pre pml4_page != NULL
*
* @post N/A
*/
static inline uint64_t *pml4e_offset(uint64_t *pml4_page, uint64_t addr)
{
return pml4_page + pml4e_index(addr);
}
/**
* @brief Calculate the page directory pointer table entry(PDPTE) for a specified input address.
*
* The page directory pointer table(PDPT) is referenced by a page map level-4 table entry(PML4E) and echo entry(PDPTE)
* in PDPT points to a page directory table(PDT). Address has the index to the PDPTE in PDPT. This function is used to
* calculate the address of PDPTE. It is typically used during the page translation process.
*
* It will return a pointer to the page directory pointer table entry(PDPTE).
*
* @param[in] pml4e A pointer to a page map level-4 table entry(PML4E).
* @param[in] addr The address for which the page directory pointer table entry(PDPTE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PDPTE.
*
* @pre pml4e != NULL
*
* @post N/A
*/
static inline uint64_t *pdpte_offset(const uint64_t *pml4e, uint64_t addr)
{
return pml4e_page_vaddr(*pml4e) + pdpte_index(addr);
}
/**
* @brief Calculate the page directory table entry(PDE) for a specified input address.
*
* The page directory table(PDT) is referenced by a page directory pointer table entry(PDPTE) and echo entry(PDE) in PDT
* points to a page table(PT). Address has the index to the PDE in PDT. This function is used to calculate the address
* of PDE. It is typically used during the page translation process.
*
* It will return a pointer to the page directory table entry(PDE).
*
* @param[in] pdpte A pointer to a page directory pointer table entry(PDPTE).
* @param[in] addr The address for which the page directory table entry(PDE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PDE.
*
* @pre pdpte != NULL
*
* @post N/A
*/
static inline uint64_t *pde_offset(const uint64_t *pdpte, uint64_t addr)
{
return pdpte_page_vaddr(*pdpte) + pde_index(addr);
}
/**
* @brief Calculate the page table entry(PTE) for a specified input address.
*
* The page table entry(PTE) is the entry that maps a page. This function is used to calculate the address of the PTE.
* It is typically used during the page translation process. The function is essential for managing memory access
* permissions and for implementing memory systems.
*
* It will return the address of a page table entry(PTE).
*
* @param[in] pde A pointer to a page directory entry(PDE).
* @param[in] addr The address for which the page table entry(PTE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the page table entry(PTE).
*
* @pre pde != NULL
*
* @post N/A
*/
static inline uint64_t *pte_offset(const uint64_t *pde, uint64_t addr)
{
return pde_page_vaddr(*pde) + pte_index(addr);
}
/**
* @brief Check whether the PS flag of the specified page directory table entry(PDE) is 1 or not.
*
* PS(Page Size) flag in PDE indicates whether maps a 2-MByte page or references a page table. This function checks this
* flag. This function is typically used in the context of setting up or modifying page tables where it's necessary to
* distinguish between large and regular page mappings.
*
* It returns the value that bit 7 is 1 if the specified PDE maps a 2-MByte page, or 0 if references a page table.
*
* @param[in] pde The page directory table entry(PDE) to check.
*
* @return The value of PS flag in the PDE.
*
* @retval PAGE_PSE indicating mapping to a 2-MByte page.
* @retval 0 indicating reference to a page table.
*
* @pre N/A
*
* @post N/A
*/
static inline uint64_t pde_large(uint64_t pde)
{
return pde & PAGE_PSE;
}
/**
* @brief Check whether the PS flag of the specified page directory pointer table entry(PDPTE) is 1 or not.
*
* PS(Page Size) flag in PDPTE indicates whether maps a 1-GByte page or references a page directory table. This function
* checks this flag. This function is typically used in the context of setting up or modifying page tables where it's
* necessary to distinguish between large and regular page mappings.
*
* It returns the value that bit 7 is 1 if the specified PDPTE maps a 1-GByte page, and 0 if references a page table.
*
* @param[in] pdpte The page directory pointer table entry(PDPTE) to check.
*
* @return The value of PS flag in the PDPTE.
*
* @retval PAGE_PSE indicating mapping to a 1-GByte page.
* @retval 0 indicating reference to a page directory table.
*
* @pre N/A
*
* @post N/A
*/
static inline uint64_t pdpte_large(uint64_t pdpte)
{
return pdpte & PAGE_PSE;
}
void *pgtable_create_trusty_root(const struct pgtable *table,
void *nworld_pml4_page, uint64_t prot_table_present, uint64_t prot_clr);

View File

@@ -8,6 +8,10 @@
#ifndef COMMON_PGTABLE_H
#define COMMON_PGTABLE_H
#include <asm/page.h>
#include <asm/mm_common.h>
uint64_t arch_pgtl_page_paddr(uint64_t pgtle);
uint64_t arch_pgtl_large(uint64_t pgtle);
/**
* @brief Translate a host physical address to a host virtual address.
@@ -66,4 +70,135 @@ static inline uint64_t round_page_down(uint64_t addr)
{
return (addr & PAGE_MASK);
}
static inline uint64_t pgtl3e_index(uint64_t address)
{
return (address >> PGTL3_SHIFT) & (PTRS_PER_PGTL3E - 1UL);
}
static inline uint64_t pgtl2e_index(uint64_t address)
{
return (address >> PGTL2_SHIFT) & (PTRS_PER_PGTL2E- 1UL);
}
static inline uint64_t pgtl1e_index(uint64_t address)
{
return (address >> PGTL1_SHIFT) & (PTRS_PER_PGTL1E - 1UL);
}
static inline uint64_t pgtl0e_index(uint64_t address)
{
return (address >> PGTL0_SHIFT) & (PTRS_PER_PGTL0E - 1UL);
}
static inline uint64_t *page_addr(uint64_t pgtle)
{
return hpa2hva(arch_pgtl_page_paddr(pgtle));
}
static inline uint64_t is_pgtl_large(uint64_t pgtle)
{
return arch_pgtl_large(pgtle);
}
/**
* @brief Calculate the page map PGT_LVL3 table entry for a specified input address.
*
* The page map PGT_LVL3 table contains 512 entries, each of which points to a PGT_LVL2 page table.
* Address has the index to the PGT_LVL3 entry. This function is used to calculate the address of PGT_LVL3 entry.
* It is typically used during the page translation process.
*
* It will return a pointer to the page map PGT_LVL3 table entry.
*
* @param[in] pgtl3_page A pointer to a PGT_LVL3 page.
* @param[in] addr The address value for which the page map PGT_LVL3 table entry address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's stage 2 tranlation, it is the guest physical address.
*
* @return A pointer to the PGT_LVL3 entry.
*
* @pre pgtl3_page != NULL
*
* @post N/A
*/
static inline uint64_t *pgtl3e_offset(uint64_t *pgtl3_page, uint64_t addr)
{
return pgtl3_page + pgtl3e_index(addr);
}
/**
* @brief Calculate the PGT_LVL2 page table entry for a specified input address.
*
* The PGT_LVL2 page table is referenced by a page map PGT_LVL3 table entry and echo entry
* in PGT_LVL2 points to a PGT_LVL1 page table. Address has the index to the PGT_LVL2 entry. This function is used to
* calculate the address of PGT_LVL2 entry. It is typically used during the page translation process.
*
* It will return a pointer to the PGT_LVL2 page table entry.
*
* @param[in] pgtl3e A pointer to a PGT_LVL3 page map table entry.
* @param[in] addr The address for which the PGT_LVL2 page table entry address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's stage2 tranlation, it is the guest physical address.
*
* @return A pointer to the PGT_LVL2 entry.
*
* @pre pgtl3e != NULL
*
* @post N/A
*/
static inline uint64_t *pgtl2e_offset(const uint64_t *pgtl3e, uint64_t addr)
{
return page_addr(*pgtl3e) + pgtl2e_index(addr);
}
/**
* @brief Calculate the PGT_LVL1 page table entry for a specified input address.
*
* The PGT_LVL1 page table is referenced by a PGT_LVL2 page table entry and echo entry
* points to a page table. Address has the index to the entry in PGT_LVL1 page table . This function
* is used to calculate the address of PDE. It is typically used during the page translation process.
*
* It will return a pointer to the PGT_LVL1 page table entry.
*
* @param[in] pgtl2e A pointer to a PGT_LVL2 page table entry.
* @param[in] addr The address for which the PGT_LVL1 page table entry address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's stage 2 translation, it is the guest physical address.
*
* @return A pointer to the PGT_LVL1 page table entry.
*
* @pre pgtl2e != NULL
*
* @post N/A
*/
static inline uint64_t *pgtl1e_offset(const uint64_t *pgtl2e, uint64_t addr)
{
return page_addr(*pgtl2e) + pgtl1e_index(addr);
}
/**
* @brief Calculate the PGT_LVL0 page table entry for a specified input address.
*
* The PGT_LVL0 page table entry is the entry that maps a page. This function is used to calculate
* the address of the PGT_LVL0 entry. It is typically used during the page translation process.
* The function is essential for managing memory access permissions and for implementing memory systems.
*
* It will return the address of a PGT_LVL0 page table entry.
*
* @param[in] pgtl1e A pointer to a PGT_LVL1 page table entry.
* @param[in] addr The address for which the PGT_LVL1 page table entry address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's stage 2 translation, it is the guest physical address.
*
* @return A pointer to the PGT_LVL0 page table entry.
*
* @pre pgtl1e != NULL
*
* @post N/A
*/
static inline uint64_t *pgtl0e_offset(const uint64_t *pgtl1e, uint64_t addr)
{
return page_addr(*pgtl1e) + pgtl0e_index(addr);
}
#endif /* COMMON_PGTABLE_H*/