doc: add module design for some defines in hwmgmt_page

GAI Tooling Notice: These contents may have been developed with support from one
or more generative artificial intelligence solutions.

ACRN hypervisor is decomposed into a series of components and modules. The
module design in hypervisor is to add inline doxygen style comments above
functions, macros, structures, etc.

This patch is to add comments for some elements in hwmgmt_page module.

Tracked-On: #8665

Signed-off-by: Haiwei Li <haiwei.li@intel.com>
This commit is contained in:
Haiwei Li 2024-07-24 20:52:04 +08:00 committed by wenlingz
parent cb431d9df4
commit fa2b8fcfbe
5 changed files with 610 additions and 77 deletions

View File

@ -1,6 +1,6 @@
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2017-2022 Intel Corporation.
* Copyright (c) 2017-2024 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -41,6 +41,11 @@
static uint64_t hv_ram_size;
static void *ppt_mmu_pml4_addr;
/**
* @brief The sanitized page
*
* The sanitized page is used to mitigate l1tf.
*/
static uint8_t sanitized_page[PAGE_SIZE] __aligned(PAGE_SIZE);
/* PPT VA and PA are identical mapping */

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2022 Intel Corporation.
* Copyright (C) 2018-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -8,6 +8,19 @@
#include <asm/page.h>
#include <logmsg.h>
/**
* @addtogroup hwmgmt_page
*
* @{
*/
/**
* @file
* @brief Implementation of page management.
*
* This file provides the core functionality required for allocating and freeing memory pages. It's a fundamental
* support to manage memory resources.
*/
struct page *alloc_page(struct page_pool *pool)
{
@ -57,3 +70,7 @@ void free_page(struct page_pool *pool, struct page *page)
bitmap_clear_nolock(bit, pool->bitmap + idx);
spinlock_release(&pool->lock);
}
/**
* @}
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2022 Intel Corporation.
* Copyright (C) 2018-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -11,8 +11,29 @@
#include <asm/mmu.h>
#include <logmsg.h>
/**
* @addtogroup hwmgmt_page
*
* @{
*/
/**
* @file
* @brief Implementation page table management.
*
* This file implements the external APIs to establish, modify, delete, or look for the mapping information. It also
* defines some helper functions to implement the features that are commonly used in this file.
*
*/
#define DBG_LEVEL_MMU 6U
/**
* @brief Host physical address of the sanitized page.
*
* The sanitized page is used to mitigate l1tf. This variable is used to store the host physical address of the
* sanitized page.
*/
static uint64_t sanitized_page_hpa;
static void sanitize_pte_entry(uint64_t *ptep, const struct pgtable *table)
@ -28,6 +49,26 @@ static void sanitize_pte(uint64_t *pt_page, const struct pgtable *table)
}
}
/**
* @brief Initializes a sanitized page.
*
* This function is responsible for initializing a sanitized page. It sets the page table entries in this sanitized page
* to point to the host physical address of the sanitized page itself.
*
* The static variable 'sanitized_page_hpa' will be set and the `sanitized_page` will be initialized.
*
* @param[out] sanitized_page The page to be sanitized.
* @param[in] hpa The host physical address that the page table entries in the sanitized page will point to.
*
* @return None
*
* @pre sanitized_page != NULL
* @pre ((uint64_t)sanitized_page & (PAGE_SIZE - 1)) == 0x0U
* @pre hpa != 0U
* @pre (hpa & (PAGE_SIZE - 1)) == 0x0U
*
* @post N/A
*/
void init_sanitized_page(uint64_t *sanitized_page, uint64_t hpa)
{
uint64_t i;
@ -256,18 +297,62 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
}
}
/*
* type: MR_MODIFY
* modify [vaddr, vaddr + size ) memory type or page access right.
* prot_clr - memory type or page access right want to be clear
* prot_set - memory type or page access right want to be set
* @pre: the prot_set and prot_clr should set before call this function.
* If you just want to modify access rights, you can just set the prot_clr
* to what you want to set, prot_clr to what you want to clear. But if you
* want to modify the MT, you should set the prot_set to what MT you want
* to set, prot_clr to the MT mask.
* type: MR_DEL
* delete [vaddr_base, vaddr_base + size ) memory region page table mapping.
/**
* @brief Modify or delete the mappings associated with the specified address range.
*
* This function modifies the properties of an existing mapping or deletes it entirely from the page table. The input
* address range is specified by [vaddr_base, vaddr_base + size). It is used when changing the access permissions of a
* memory region or when freeing a previously mapped region. This operation is critical for dynamic memory management,
* allowing the system to adapt to changes in memory usage patterns or to reclaim resources.
*
* For error case behaviors:
* - If the 'type' is MR_MODIFY and any page referenced by the PML4E in the specified address range is not present, the
* function asserts that the operation is invalid.
* For normal case behaviors(when the error case conditions are not satisfied):
* - If any page referenced by the PDPTE/PDE/PTE in the specified address range is not present, there is no change to
* the corresponding mapping and it continues the operation.
* - If any PDPTE/PDE in the specified address range maps a large page and the large page address exceeds the specified
* address range, the function splits the large page into next level page to allow for the modification or deletion of
* the mappings and the execute right will be recovered by the callback function table->recover_exe_right() when a 2MB
* page is split to 4KB pages.
* - If the 'type' is MR_MODIFY, the function modifies the properties of the existing mapping to match the specified
* properties.
* - If the 'type' is MR_DEL, the function will set corresponding page table entries to point to the sanitized page.
*
* @param[inout] pml4_page A pointer to the specified PML4 table.
* @param[in] vaddr_base The specified input address determining the start of the input address range whose mapping
* information is to be updated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
* @param[in] size The size of the specified input address range whose mapping information is to be updated.
* @param[in] prot_set Bit positions representing the specified properties which need to be set.
* Bits specified by prot_clr are cleared before each bit specified by prot_set is set to 1.
* @param[in] prot_clr Bit positions representing the specified properties which need to be cleared.
* Bits specified by prot_clr are cleared before each bit specified by prot_set is set to 1.
* @param[in] table A pointer to the struct pgtable containing the information of the specified memory operations.
* @param[in] type The type of operation to perform (MR_MODIFY or MR_DEL).
*
* @return None
*
* @pre pml4_page != NULL
* @pre table != NULL
* @pre (type == MR_MODIFY) || (type == MR_DEL)
* @pre For x86 hypervisor, the following conditions shall be met if "type == MR_MODIFY".
* - (prot_set & ~(PAGE_RW | PAGE_USER | PAGE_PWT | PAGE_PCD | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PSE | PAGE_GLOBAL
* | PAGE_PAT_LARGE | PAGE_NX) == 0)
* - (prot_clr & ~(PAGE_RW | PAGE_USER | PAGE_PWT | PAGE_PCD | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PSE | PAGE_GLOBAL
* | PAGE_PAT_LARGE | PAGE_NX) == 0)
* @pre For the VM EPT mappings, the following conditions shall be met if "type == MR_MODIFY".
* - (prot_set & ~(EPT_RD | EPT_WR | EPT_EXE | EPT_MT_MASK) == 0)
* - (prot_set & EPT_MT_MASK) == EPT_UNCACHED || (prot_set & EPT_MT_MASK) == EPT_WC ||
* (prot_set & EPT_MT_MASK) == EPT_WT || (prot_set & EPT_MT_MASK) == EPT_WP || (prot_set & EPT_MT_MASK) == EPT_WB
* - (prot_clr & ~(EPT_RD | EPT_WR | EPT_EXE | EPT_MT_MASK) == 0)
* - (prot_clr & EPT_MT_MASK) == EPT_UNCACHED || (prot_clr & EPT_MT_MASK) == EPT_WC ||
* (prot_clr & EPT_MT_MASK) == EPT_WT || (prot_clr & EPT_MT_MASK) == EPT_WP || (prot_clr & EPT_MT_MASK) == EPT_WB
*
* @post N/A
*
* @remark N/A
*/
void pgtable_modify_or_del_map(uint64_t *pml4_page, uint64_t vaddr_base, uint64_t size,
uint64_t prot_set, uint64_t prot_clr, const struct pgtable *table, uint32_t type)
@ -422,10 +507,46 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
}
}
/*
* action: MR_ADD
* add [vaddr_base, vaddr_base + size ) memory region page table mapping.
* @pre: the prot should set before call this function.
/**
* @brief Add new page table mappings.
*
* This function maps a virtual address range specified by [vaddr_base, vaddr_base + size) to a physical address range
* starting from 'paddr_base'.
*
* - If any subrange within [vaddr_base, vaddr_base + size) is already mapped, there is no change to the corresponding
* mapping and it continues the operation.
* - When a new 1GB or 2MB mapping is established, the callback function table->tweak_exe_right() is invoked to tweak
* the execution bit.
* - When a new page table referenced by a new PDPTE/PDE is created, all entries in the page table are initialized to
* point to the sanitized page by default.
* - Finally, the new mappings are established and initialized according to the specified address range and properties.
*
* @param[inout] pml4_page A pointer to the specified PML4 table hierarchy.
* @param[in] paddr_base The specified physical address determining the start of the physical memory region.
* It is the host physical address.
* @param[in] vaddr_base The specified input address determining the start of the input address space.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
* @param[in] size The size of the specified input address space.
* @param[in] prot Bit positions representing the specified properties which need to be set.
* @param[in] table A pointer to the struct pgtable containing the information of the specified memory operations.
*
* @return None
*
* @pre pml4_page != NULL
* @pre Any subrange within [vaddr_base, vaddr_base + size) shall already be unmapped.
* @pre For x86 hypervisor mapping, the following condition shall be met.
* - prot & ~(PAGE_PRESENT| PAGE_RW | PAGE_USER | PAGE_PWT | PAGE_PCD | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PSE |
* PAGE_GLOBAL | PAGE_PAT_LARGE | PAGE_NX) == 0
* @pre For VM EPT mapping, the following conditions shall be met.
* - prot & ~(EPT_RD | EPT_WR | EPT_EXE | EPT_MT_MASK | EPT_IGNORE_PAT) == 0
* - (prot & EPT_MT_MASK) == EPT_UNCACHED || (prot & EPT_MT_MASK) == EPT_WC || (prot & EPT_MT_MASK) == EPT_WT ||
* (prot & EPT_MT_MASK) == EPT_WP || (prot & EPT_MT_MASK) == EPT_WB
* @pre table != NULL
*
* @post N/A
*
* @remark N/A
*/
void pgtable_add_map(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,
uint64_t size, uint64_t prot, const struct pgtable *table)
@ -455,6 +576,24 @@ void pgtable_add_map(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_ba
}
}
/**
* @brief Create a new root page table.
*
* This function initializes and returns a new root page table. It is typically used during the setup of a new execution
* context, such as initializing a hypervisor PML4 table or creating a virtual machine. The root page table is essential
* for defining the virtual memory layout for the context.
*
* It creates a new root page table and every entries in the page table are initialized to point to the sanitized page.
* Finally, the function returns the root page table pointer.
*
* @param[in] table A pointer to the struct pgtable containing the information of the specified memory operations.
*
* @return A pointer to the newly created root page table.
*
* @pre table != NULL
*
* @post N/A
*/
void *pgtable_create_root(const struct pgtable *table)
{
uint64_t *page = (uint64_t *)alloc_page(table->pool);
@ -462,6 +601,31 @@ void *pgtable_create_root(const struct pgtable *table)
return page;
}
/**
* @brief Create a root page table for Secure World.
*
* This function initializes a new root page table for Secure World. It is intended to be used during the initialization
* phase of Trusty, setting up isolated memory regions for secure execution. Secure world can access Normal World's
* memory, but Normal World cannot access Secure World's memory. The PML4T/PDPT for Secure World are separated from
* Normal World. PDT/PT are shared in both Secure World's EPT and Normal World's EPT. So this function copies the PDPTEs
* from the Normal World to the Secure World.
*
* - It creates a new root page table and every entries are initialized to point to the sanitized page by default.
* - The access right specified by prot_clr is cleared for Secure World PDPTEs.
* - Finally, the function returns the new root page table pointer.
*
* @param[in] table A pointer to the struct pgtable containing the information of the specified memory operations.
* @param[in] nworld_pml4_page A pointer to pml4 table hierarchy in Normal World.
* @param[in] prot_table_present Mask indicating the page referenced is present.
* @param[in] prot_clr Bit positions representing the specified properties which need to be cleared.
*
* @return A pointer to the newly created root page table for Secure World.
*
* @pre table != NULL
* @pre nworld_pml4_page != NULL
*
* @post N/A
*/
void *pgtable_create_trusty_root(const struct pgtable *table,
void *nworld_pml4_page, uint64_t prot_table_present, uint64_t prot_clr)
{
@ -474,7 +638,7 @@ void *pgtable_create_trusty_root(const struct pgtable *table,
* Secure world can access Normal World's memory,
* but Normal World can not access Secure World's memory.
* The PML4/PDPT for Secure world are separated from
* Normal World.PD/PT are shared in both Secure world's EPT
* Normal World. PD/PT are shared in both Secure world's EPT
* and Normal World's EPT
*/
pml4_base = pgtable_create_root(table);
@ -508,7 +672,39 @@ void *pgtable_create_trusty_root(const struct pgtable *table,
}
/**
* @pre (pml4_page != NULL) && (pg_size != NULL)
* @brief Look for the paging-structure entry that contains the mapping information for the specified input address.
*
* This function looks for the paging-structure entry that contains the mapping information for the specified input
* address of the translation process. It is used to search the page table hierarchy for the entry corresponding to the
* given virtual address. The function traverses the page table hierarchy from the PML4 down to the appropriate page
* table level, returning the entry if found.
*
* - If specified address is mapped in the page table hierarchy, it will return a pointer to the page table entry that
* maps the specified address.
* - If the specified address is not mapped in the page table hierarchy, it will return NULL.
*
* @param[in] pml4_page A pointer to the specified PML4 table hierarchy.
* @param[in] addr The specified input address whose mapping information is to be searched.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
* @param[out] pg_size A pointer to the size of the page controlled by the returned paging-structure entry.
* @param[in] table A pointer to the struct pgtable which provides the page pool and callback functions to be used when
* creating the new page.
*
* @return A pointer to the paging-structure entry that maps the specified input address.
*
* @retval non-NULL There is a paging-structure entry that contains the mapping information for the specified input
* address.
* @retval NULL There is no paging-structure entry that contains the mapping information for the specified input
* address.
*
* @pre pml4_page != NULL
* @pre pg_size != NULL
* @pre table != NULL
*
* @post N/A
*
* @remark N/A
*/
const uint64_t *pgtable_lookup_entry(uint64_t *pml4_page, uint64_t addr, uint64_t *pg_size, const struct pgtable *table)
{
@ -548,3 +744,7 @@ const uint64_t *pgtable_lookup_entry(uint64_t *pml4_page, uint64_t addr, uint64_
return pret;
}
/**
* @}
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2022 Intel Corporation.
* Copyright (C) 2018-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,6 +10,28 @@
#include <asm/lib/spinlock.h>
#include <board_info.h>
/**
* @defgroup hwmgmt_page hwmgmt.page
* @ingroup hwmgmt
* @brief Support the basic paging mechanism.
*
* This module mainly provides the interfaces to manipulate the paging structures.
* These operations are commonly used by:
* 1. hypervisor's MMU (Memory Management Unit) to manage the host page tables;
* 2. EPT to manage the extended page tables for guest.
* It also provides the interfaces to conduct the address translation between Host Physical Address and Host Virtual
* Address.
*
* @{
*/
/**
* @file
* @brief All APIs to support page management.
*
* This file defines macros, structures and function declarations for managing memory pages.
*/
#define PAGE_SHIFT 12U
#define PAGE_SIZE (1U << PAGE_SHIFT)
#define PAGE_MASK 0xFFFFFFFFFFFFF000UL
@ -22,26 +44,96 @@
/* size of the high MMIO address space: 1GB */
#define PLATFORM_HI_MMIO_SIZE 0x40000000UL
/**
* @brief Calculate the number of page map level-4(PML4) that is requested to control the memory region with the
* specified size.
*
* Page map level-4(PML4) table is the top-level table in the x86-64 paging hierarchy. Each entry in the PML4 table can
* potentially map a 512 GiB region, with the entire PML4 table capable of addressing up to 256 TiB. So 1 PML4 table is
* enough to control the entire physical address space.
*/
#define PML4_PAGE_NUM(size) 1UL
/**
* @brief Calculate the number of page directory pointer tables(PDPT) that is requested to control the memory region
* with the specified size.
*
* A page directory pointer table(PDPT) can be referenced by a PML4E and each PML4E controls access to a 512-GByte
* region. It is supposed to be called when hypervisor allocates the page-directory-pointer tables for hypervisor and
* all VMs.
*/
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
/**
* @brief Calculate the number of page directories(PD) that is requested to control the memory region with the specified
* size.
*
* A page directory(PD) can be referenced by a PDPTE and each PDPTE controls access to a 1-GByte region. It is supposed
* to be called when hypervisor allocates the page directories for hypervisor and all VMs.
*/
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
/**
* @brief Calculate the number of page tables(PT) that is requested to control the memory region with the specified
* size.
*
* A page table(PT) can be referenced by a PDE and each PDE controls access to a 2-MByte region. It is supposed to be
* called when hypervisor allocates the page tables for hypervisor and all VMs.
*/
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
/**
* @brief Data structure to illustrate a 4-KByte memory region with an alignment of 4-KByte.
*
* This data structure is used to illustrate a 4-KByte memory region with an alignment of 4-KByte, calling it a 4-KByte
* page. It can be used to support the memory management in hypervisor and the extended page-table mechanism for VMs. It
* can also be used when hypervisor accesses the 4-KByte aligned memory region whose size is a multiple of 4-KByte.
*
* @consistency N/A
* @alignment 4096
*
* @remark N/A
*/
struct page {
uint8_t contents[PAGE_SIZE];
uint8_t contents[PAGE_SIZE]; /**< A 4-KByte page in the memory. */
} __aligned(PAGE_SIZE);
/**
* @brief Data structure that contains a pool of memory pages.
*
* This structure is designed to manage a collection of memory pages, facilitating efficient allocation,
* deallocation, and reuse of pages. It is typically used in scenarios where memory allocation performance
* is critical, such as in operating systems or high-performance applications. The page pool aims to minimize
* the overhead associated with frequent memory page allocations by maintaining a ready-to-use pool of pages.
* It is used to support the memory management in hypervisor and the extended page-table mechanism for VMs.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct page_pool {
struct page *start_page;
spinlock_t lock;
uint64_t bitmap_size;
uint64_t *bitmap;
uint64_t last_hint_id;
struct page *dummy_page;
struct page *start_page; /**< The first page in the pool. */
spinlock_t lock; /**< The spinlock to protect simultaneous access of the page pool. */
/**
* @brief A pointer to the bitmap that represents the allocation status of each page in the pool.
*
* The bitmap is a data structure that represents the allocation status of each page in the pool. Each bit in
* the bitmap corresponds to a page in the pool. If the bit is set to 1, the page is allocated; otherwise, the
* page is free. The bitmap is used to track the allocation status of each page in the pool.
*/
uint64_t *bitmap;
uint64_t bitmap_size; /**< The number of bitmap. */
uint64_t last_hint_id; /**< The last bitmap ID that is used to allocate a page. */
/**
* @brief A pointer to the dummy page
*
* This is used when there's no page available in the pool.
*/
struct page *dummy_page;
};
struct page *alloc_page(struct page_pool *pool);
void free_page(struct page_pool *pool, struct page *page);
#endif /* PAGE_H */
/**
* @}
*/

View File

@ -1,18 +1,27 @@
/*
* Copyright (C) 2018-2022 Intel Corporation.
* Copyright (C) 2018-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file pgtable.h
*
* @brief Address translation and page table operations
*/
#ifndef PGTABLE_H
#define PGTABLE_H
#include <asm/page.h>
/**
* @addtogroup hwmgmt_page
*
* @{
*/
/**
* @file
* @brief All APIs to support page table management
*
* This file defines macros, structures, declarations and functions related for managing page tables.
*
*/
#define PAGE_PRESENT (1UL << 0U)
#define PAGE_RW (1UL << 1U)
#define PAGE_USER (1UL << 2U)
@ -148,65 +157,129 @@
/**
* @brief Page tables level in IA32 paging mode
*
* 4-level paging in IA32 mode may map linear addresses to 4-KByte pages, 2-MByte pages, or 1-GByte pages. The 4 levels
* are PML4, PDPT, PD, and PT. The value to present each level is fixed.
*/
enum _page_table_level {
/**
* @brief The PML4 level in the page tables
*/
IA32E_PML4 = 0,
/**
* @brief The Page-Directory-Pointer-Table level in the page tables
*/
IA32E_PDPT = 1,
/**
* @brief The Page-Directory level in the page tables
*/
IA32E_PD = 2,
/**
* @brief The Page-Table level in the page tables
*/
IA32E_PT = 3,
IA32E_PML4 = 0, /**< The Page-Map-Level-4(PML4) level in the page tables.
* The value is fixed to 0. */
IA32E_PDPT = 1, /**< The Page-Directory-Pointer-Table(PDPT) level in the page tables. */
IA32E_PD = 2, /**< The Page-Directory(PD) level in the page tables. */
IA32E_PT = 3, /**< The Page-Table(PT) level in the page tables. */
};
/**
* @brief Data structure that contains the related operations and properties of page table.
*
* This structure is used to add/modify/delete page table.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct pgtable {
/**
* @brief Default memory access rights.
*
* A linear address can be translated to a physical address by the page tables. The translation is controlled by
* the memory access rights, as defined by the architecture's memory system design. The default memory access
* rights can be used to set the memory access rights for a page table entry when the page table is created.
*/
uint64_t default_access_right;
/**
* @brief Mask to check if the page referenced by entry is present.
*
* The presence of a page is indicated by specific bits in the entry, as defined by the architecture's memory
* system design. For example, in ept table entry it's indicated by bit0|bit1|bit2, and in mmu table entry it's
* indicated by bit 0.
*/
uint64_t pgentry_present_mask;
struct page_pool *pool;
struct page_pool *pool; /**< Pointer to the page pool used for managing pages. */
/**
* @brief Function to check if large pages are supported.
*
* This function is used to check if large pages are supported for a specific page table level and memory access
* rights.
*/
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
void (*clflush_pagewalk)(const void *p);
void (*tweak_exe_right)(uint64_t *entry);
void (*recover_exe_right)(uint64_t *entry);
void (*clflush_pagewalk)(const void *p); /**< Function to flush a page table entry from the cache. */
void (*tweak_exe_right)(uint64_t *entry); /**< Function to tweak execution rights for an entry. */
void (*recover_exe_right)(uint64_t *entry); /**< Function to recover execution rights for an entry. */
};
/**
* @brief Check whether the page referenced by the specified paging-structure entry is present or not.
*
* This function is used to check if the page referenced is present. A paging-structure entry references a page. The
* presence of a page is indicated by specific bits in the entry, as defined by the architecture's memory system design.
* For example, in ept table entry it's indicated by bit0|bit1|bit2, and in mmu table entry it's indicated by bit 0.
*
* This function checks whether the page referenced exists based on specific bits.
*
* @param[in] table A pointer to the structure pgtable which provides the mask to check whether page referenced is
* present or not.
* @param[in] pte The paging-structure entry to check.
*
* @return A boolean value indicating if the page referenced by the specified paging-structure entry is present
*
* @retval true Indicates the page referenced is present.
* @retval false Indicates the page referenced is not present.
*
* @pre table != NULL
*
* @post N/A
*/
static inline bool pgentry_present(const struct pgtable *table, uint64_t pte)
{
return ((table->pgentry_present_mask & (pte)) != 0UL);
}
/**
* @brief Address space translation
* @brief Translate a host physical address to a host virtual address before paging mode enabled.
*
* @addtogroup acrn_mem ACRN Memory Management
* @{
*/
/* hpa <--> hva, now it is 1:1 mapping */
/**
* @brief Translate host-physical address to host-virtual address
* This function is used to translate a host physical address to a host virtual address before paging mode enabled. HPA
* is 1:1 mapping to HVA.
*
* @param[in] x The specified host-physical address
* It returns the host virtual address that corresponds to the given host physical address.
*
* @return The translated host-virtual address
* @param[in] x The host physical address
*
* @return The translated host virtual address
*
* @retval NULL if x == 0
*
* @pre N/A
*
* @post N/A
*
* @remark This function is used before paging mode enabled.
*/
static inline void *hpa2hva_early(uint64_t x)
{
return (void *)x;
}
/**
* @brief Translate host-virtual address to host-physical address
* @brief Translate a host virtual address to a host physical address before paging mode enabled.
*
* @param[in] x The specified host-virtual address
* This function is used to translate a host virtual address to a host physical address before paging mode enabled. HVA
* is 1:1 mapping to HPA.
*
* @return The translated host-physical address
* It returns the host physical address that corresponds to the given host virtual address.
*
* @param[in] x The host virtual address to be translated
*
* @return The translated host physical address
*
* @retval 0 if x == NULL
*
* @pre N/A
*
* @post N/A
*
* @remark This function is used before paging mode enabled.
*/
static inline uint64_t hva2hpa_early(void *x)
{
@ -214,22 +287,47 @@ static inline uint64_t hva2hpa_early(void *x)
}
/**
* @brief Translate host-physical address to host-virtual address
* @brief Translate a host physical address to a host virtual address.
*
* @param[in] x The specified host-physical address
* This function is used to translate a host physical address to a host virtual address. HPA is 1:1 mapping to HVA.
*
* @return The translated host-virtual address
* It returns the host virtual address that corresponds to the given host physical address.
*
* @param[in] x The host physical address to be translated.
*
* @return The translated host virtual address
*
* @retval NULL if x == 0
*
* @pre N/A
*
* @post N/A
*
* @remark This function is used after paging mode enabled.
*/
static inline void *hpa2hva(uint64_t x)
{
return (void *)x;
}
/**
* @brief Translate host-virtual address to host-physical address
* @brief Translate a host virtual address to a host physical address.
*
* @param[in] x The specified host-virtual address
* This function is used to translate a host virtual address to a host physical address. HVA is 1:1 mapping to HPA.
*
* @return The translated host-physical address
* It returns the host physical address that corresponds to the given host virtual address.
*
* @param[in] x The host virtual address to be translated.
*
* @return The translated host physical address.
*
* @retval 0 if x == NULL
*
* @pre N/A
*
* @post N/A
*
* @remark This function is used after paging mode enabled.
*/
static inline uint64_t hva2hpa(const void *x)
{
@ -271,21 +369,101 @@ static inline uint64_t *pde_page_vaddr(uint64_t pde)
return hpa2hva(pde & PDE_PFN_MASK);
}
/**
* @brief Calculate the page map level-4 table entry(PML4E) for a specified input address.
*
* The page map level-4 table(PML4T) contains 512 entries, each of which points to a page directory pointer table(PDPT).
* Address has the index to the PML4E in PML4T. This function is used to calculate the address of PML4E. It is typically
* used during the page translation process.
*
* It will return a pointer to the page map level-4 table entry(PML4E).
*
* @param[in] pml4_page A pointer to a page map level-4 table(PML4T) page.
* @param[in] addr The address value for which the page map level-4 table entry(PML4E) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PML4E.
*
* @pre pml4_page != NULL
*
* @post N/A
*/
static inline uint64_t *pml4e_offset(uint64_t *pml4_page, uint64_t addr)
{
return pml4_page + pml4e_index(addr);
}
/**
* @brief Calculate the page directory pointer table entry(PDPTE) for a specified input address.
*
* The page directory pointer table(PDPT) is referenced by a page map level-4 table entry(PML4E) and echo entry(PDPTE)
* in PDPT points to a page directory table(PDT). Address has the index to the PDPTE in PDPT. This function is used to
* calculate the address of PDPTE. It is typically used during the page translation process.
*
* It will return a pointer to the page directory pointer table entry(PDPTE).
*
* @param[in] pml4e A pointer to a page map level-4 table entry(PML4E).
* @param[in] addr The address for which the page directory pointer table entry(PDPTE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PDPTE.
*
* @pre pml4e != NULL
*
* @post N/A
*/
static inline uint64_t *pdpte_offset(const uint64_t *pml4e, uint64_t addr)
{
return pml4e_page_vaddr(*pml4e) + pdpte_index(addr);
}
/**
* @brief Calculate the page directory table entry(PDE) for a specified input address.
*
* The page directory table(PDT) is referenced by a page directory pointer table entry(PDPTE) and echo entry(PDE) in PDT
* points to a page table(PT). Address has the index to the PDE in PDT. This function is used to calculate the address
* of PDE. It is typically used during the page translation process.
*
* It will return a pointer to the page directory table entry(PDE).
*
* @param[in] pdpte A pointer to a page directory pointer table entry(PDPTE).
* @param[in] addr The address for which the page directory table entry(PDE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the PDE.
*
* @pre pdpte != NULL
*
* @post N/A
*/
static inline uint64_t *pde_offset(const uint64_t *pdpte, uint64_t addr)
{
return pdpte_page_vaddr(*pdpte) + pde_index(addr);
}
/**
* @brief Calculate the page table entry(PTE) for a specified input address.
*
* The page table entry(PTE) is the entry that maps a page. This function is used to calculate the address of the PTE.
* It is typically used during the page translation process. The function is essential for managing memory access
* permissions and for implementing memory systems.
*
* It will return the address of a page table entry(PTE).
*
* @param[in] pde A pointer to a page directory entry(PDE).
* @param[in] addr The address for which the page table entry(PTE) address is to be calculated.
* For hypervisor's MMU, it is the host virtual address.
* For each VM's EPT, it is the guest physical address.
*
* @return A pointer to the page table entry(PTE).
*
* @pre pde != NULL
*
* @post N/A
*/
static inline uint64_t *pte_offset(const uint64_t *pde, uint64_t addr)
{
return pde_page_vaddr(*pde) + pte_index(addr);
@ -308,11 +486,51 @@ static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct pgtabl
table->clflush_pagewalk(ptep);
}
/**
* @brief Check whether the PS flag of the specified page directory table entry(PDE) is 1 or not.
*
* PS(Page Size) flag in PDE indicates whether maps a 2-MByte page or references a page table. This function checks this
* flag. This function is typically used in the context of setting up or modifying page tables where it's necessary to
* distinguish between large and regular page mappings.
*
* It returns the value that bit 7 is 1 if the specified PDE maps a 2-MByte page, or 0 if references a page table.
*
* @param[in] pde The page directory table entry(PDE) to check.
*
* @return The value of PS flag in the PDE.
*
* @retval PAGE_PSE indicating mapping to a 2-MByte page.
* @retval 0 indicating reference to a page table.
*
* @pre N/A
*
* @post N/A
*/
static inline uint64_t pde_large(uint64_t pde)
{
return pde & PAGE_PSE;
}
/**
* @brief Check whether the PS flag of the specified page directory pointer table entry(PDPTE) is 1 or not.
*
* PS(Page Size) flag in PDPTE indicates whether maps a 1-GByte page or references a page directory table. This function
* checks this flag. This function is typically used in the context of setting up or modifying page tables where it's
* necessary to distinguish between large and regular page mappings.
*
* It returns the value that bit 7 is 1 if the specified PDPTE maps a 1-GByte page, and 0 if references a page table.
*
* @param[in] pdpte The page directory pointer table entry(PDPTE) to check.
*
* @return The value of PS flag in the PDPTE.
*
* @retval PAGE_PSE indicating mapping to a 1-GByte page.
* @retval 0 indicating reference to a page directory table.
*
* @pre N/A
*
* @post N/A
*/
static inline uint64_t pdpte_large(uint64_t pdpte)
{
return pdpte & PAGE_PSE;
@ -335,7 +553,8 @@ void pgtable_add_map(uint64_t *pml4_page, uint64_t paddr_base,
void pgtable_modify_or_del_map(uint64_t *pml4_page, uint64_t vaddr_base,
uint64_t size, uint64_t prot_set, uint64_t prot_clr,
const struct pgtable *table, uint32_t type);
#endif /* PGTABLE_H */
/**
* @}
*/
#endif /* PGTABLE_H */
*/