hv:multiarch: move main memory structure to common

move struct pgtable and page_pool to common code and
move alloc_page/free_page/init_page_pool to common

Tracked-On: #8831
Signed-off-by: hangliu1 <hang1.liu@intel.com>
Reviewed-by: Liu, Yifan1 <yifan1.liu@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
hangliu1
2025-10-13 10:29:59 +08:00
committed by acrnsi-robot
parent 2ab068400b
commit 2e4f5e79b7
13 changed files with 132 additions and 170 deletions

View File

@@ -161,6 +161,7 @@ COMMON_C_SRCS += common/timer.c
COMMON_C_SRCS += common/softirq.c
COMMON_C_SRCS += common/trace.c
COMMON_C_SRCS += common/schedule.c
COMMON_C_SRCS += common/mmu.c
ifeq ($(CONFIG_SCHED_NOOP),y)
COMMON_C_SRCS += common/sched_noop.c
endif

View File

@@ -69,7 +69,6 @@ HW_C_SRCS += arch/x86/security.c
HW_C_SRCS += arch/x86/mmu.c
HW_C_SRCS += arch/x86/e820.c
HW_C_SRCS += arch/x86/pagetable.c
HW_C_SRCS += arch/x86/page.c
HW_C_SRCS += arch/x86/notify.c
HW_C_SRCS += arch/x86/vtd.c
HW_C_SRCS += arch/x86/gdt.c

View File

@@ -144,7 +144,7 @@ void init_pcpu_pre(bool is_bsp)
init_e820();
/* reserve ppt buffer from e820 */
allocate_ppt_pages();
ppt_page_pool_init();
/* Initialize the hypervisor paging */
init_paging();

View File

@@ -146,7 +146,7 @@ static inline bool use_large_page(enum _page_table_level level, uint64_t prot)
return ret;
}
static inline void ept_clflush_pagewalk(const void* etry)
static inline void ept_flush_cache_pagewalk(const void* etry)
{
iommu_flush_cache(etry, sizeof(uint64_t));
}
@@ -168,6 +168,17 @@ static inline void ept_recover_exe_right(uint64_t *entry)
*entry |= EPT_EXE;
}
static inline uint64_t ept_default_access_right(void)
{
return EPT_RWX;
}
static inline uint64_t ept_pgentry_present(uint64_t pte)
{
return ((EPT_RWX & (pte)) != 0UL);
}
void init_ept_pgtable(struct pgtable *table, uint16_t vm_id)
{
struct acrn_vm *vm = get_vm_from_vmid(vm_id);
@@ -182,9 +193,9 @@ void init_ept_pgtable(struct pgtable *table, uint16_t vm_id)
ept_page_pool[vm_id].last_hint_id = 0UL;
table->pool = &ept_page_pool[vm_id];
table->default_access_right = EPT_RWX;
table->pgentry_present_mask = EPT_RWX;
table->clflush_pagewalk = ept_clflush_pagewalk;
table->get_default_access_right = ept_default_access_right;
table->pgentry_present = ept_pgentry_present;
table->flush_cache_pagewalk = ept_flush_cache_pagewalk;
table->large_page_support = ept_large_page_support;
/* Mitigation for issue "Machine Check Error on Page Size Change" */
@@ -434,12 +445,12 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
for (i = 0UL; i < PTRS_PER_PML4E; i++) {
pml4e = pml4e_offset((uint64_t *)get_eptp(vm), i << PML4E_SHIFT);
if (!pgentry_present(table, (*pml4e))) {
if (!table->pgentry_present(*pml4e)) {
continue;
}
for (j = 0UL; j < PTRS_PER_PDPTE; j++) {
pdpte = pdpte_offset(pml4e, j << PDPTE_SHIFT);
if (!pgentry_present(table, (*pdpte))) {
if (!table->pgentry_present(*pdpte)) {
continue;
}
if (pdpte_large(*pdpte) != 0UL) {
@@ -448,7 +459,7 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
}
for (k = 0UL; k < PTRS_PER_PDE; k++) {
pde = pde_offset(pdpte, k << PDE_SHIFT);
if (!pgentry_present(table, (*pde))) {
if (!table->pgentry_present(*pde)) {
continue;
}
if (pde_large(*pde) != 0UL) {
@@ -457,7 +468,7 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb)
}
for (m = 0UL; m < PTRS_PER_PTE; m++) {
pte = pte_offset(pde, m << PTE_SHIFT);
if (pgentry_present(table, (*pte))) {
if (table->pgentry_present(*pte)) {
cb(pte, PTE_SIZE);
}
}

View File

@@ -31,9 +31,9 @@
#include <asm/cpufeatures.h>
#include <asm/pgtable.h>
#include <asm/cpu_caps.h>
#include <asm/mmu.h>
#include <asm/vmx.h>
#include <reloc.h>
#include <mmu.h>
#include <asm/guest/vm.h>
#include <asm/boot/ld_sym.h>
#include <logmsg.h>
@@ -80,20 +80,29 @@ static inline bool ppt_large_page_support(enum _page_table_level level, __unused
return support;
}
static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused)))
static inline void ppt_flush_cache_pagewalk(const void* entry __attribute__((unused)))
{
}
static inline uint64_t ppt_pgentry_present(uint64_t pte)
{
return ((PAGE_PRESENT & (pte)) != 0UL);
}
static inline uint64_t ppt_default_access_right(void)
{
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
}
static inline void ppt_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {}
static inline void ppt_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {}
static const struct pgtable ppt_pgtable = {
.default_access_right = (PAGE_PRESENT | PAGE_RW | PAGE_USER),
.pgentry_present_mask = PAGE_PRESENT,
.pool = &ppt_page_pool,
.get_default_access_right = ppt_default_access_right,
.pgentry_present = ppt_pgentry_present,
.large_page_support = ppt_large_page_support,
.clflush_pagewalk = ppt_clflush_pagewalk,
.flush_cache_pagewalk = ppt_flush_cache_pagewalk,
.tweak_exe_right = ppt_nop_tweak_exe_right,
.recover_exe_right = ppt_nop_recover_exe_right,
};
@@ -230,19 +239,13 @@ void set_paging_x(uint64_t base, uint64_t size)
base_aligned, size_aligned, 0UL, PAGE_NX, &ppt_pgtable, MR_MODIFY);
}
void allocate_ppt_pages(void)
void ppt_page_pool_init(void)
{
uint64_t page_base;
uint64_t bitmap_size = get_ppt_page_num() / 8;
uint64_t pg_num = get_ppt_page_num();
uint64_t page_base, bitmap_base;
page_base = e820_alloc_memory(sizeof(struct page) * get_ppt_page_num(), MEM_4G);
ppt_page_pool.bitmap = (uint64_t *)e820_alloc_memory(bitmap_size, MEM_4G);
ppt_page_pool.start_page = (struct page *)(void *)page_base;
ppt_page_pool.bitmap_size = bitmap_size / sizeof(uint64_t);
ppt_page_pool.dummy_page = NULL;
memset(ppt_page_pool.bitmap, 0, bitmap_size);
bitmap_base = e820_alloc_memory(sizeof(struct page), MEM_4G);
init_page_pool(&ppt_page_pool, (uint64_t *)page_base, (uint64_t *)bitmap_base, pg_num);
}
void init_paging(void)

View File

@@ -88,7 +88,7 @@ static void try_to_free_pgtable_page(const struct pgtable *table,
for (index = 0UL; index < PTRS_PER_PTE; index++) {
uint64_t *pte = pt_page + index;
if (pgentry_present(table, (*pte))) {
if (table->pgentry_present(*pte)) {
break;
}
}
@@ -136,7 +136,7 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
paddr += paddrinc;
}
ref_prot = table->default_access_right;
ref_prot = table->get_default_access_right();
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot, table);
/* TODO: flush the TLB */
@@ -183,7 +183,7 @@ static void modify_or_del_pte(uint64_t *pde, uint64_t vaddr_start, uint64_t vadd
for (; index < PTRS_PER_PTE; index++) {
uint64_t *pte = pt_page + index;
if (!pgentry_present(table, (*pte))) {
if (!table->pgentry_present(*pte)) {
/*suppress warning message for low memory (< 1MBytes),as service VM
* will update MTTR attributes for this region by default whether it
* is present or not.
@@ -223,7 +223,7 @@ static void modify_or_del_pde(uint64_t *pdpte, uint64_t vaddr_start, uint64_t va
uint64_t *pde = pd_page + index;
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
if (!pgentry_present(table, (*pde))) {
if (!table->pgentry_present(*pde)) {
if (type == MR_MODIFY) {
pr_warn("%s, addr: 0x%lx pde is not present.\n", __func__, vaddr);
}
@@ -270,7 +270,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
uint64_t *pdpte = pdpt_page + index;
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
if (!pgentry_present(table, (*pdpte))) {
if (!table->pgentry_present(*pdpte)) {
if (type == MR_MODIFY) {
pr_warn("%s, vaddr: 0x%lx pdpte is not present.\n", __func__, vaddr);
}
@@ -368,7 +368,7 @@ void pgtable_modify_or_del_map(uint64_t *pml4_page, uint64_t vaddr_base, uint64_
while (vaddr < vaddr_end) {
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
pml4e = pml4e_offset(pml4_page, vaddr);
if ((!pgentry_present(table, (*pml4e))) && (type == MR_MODIFY)) {
if ((!table->pgentry_present(*pml4e)) && (type == MR_MODIFY)) {
ASSERT(false, "invalid op, pml4e not present");
} else {
modify_or_del_pdpte(pml4e, vaddr, vaddr_end, prot_set, prot_clr, table, type);
@@ -394,7 +394,7 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
for (; index < PTRS_PER_PTE; index++) {
uint64_t *pte = pt_page + index;
if (pgentry_present(table, (*pte))) {
if (table->pgentry_present(*pte)) {
pr_fatal("%s, pte 0x%lx is already present!\n", __func__, vaddr);
} else {
set_pgentry(pte, paddr | prot, table);
@@ -430,7 +430,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
if (pde_large(*pde) != 0UL) {
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
} else {
if (!pgentry_present(table, (*pde))) {
if (!table->pgentry_present(*pde)) {
if (table->large_page_support(IA32E_PD, prot) &&
mem_aligned_check(paddr, PDE_SIZE) &&
mem_aligned_check(vaddr, PDE_SIZE) &&
@@ -445,7 +445,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
break; /* done */
} else {
void *pt_page = alloc_page(table->pool);
construct_pgentry(pde, pt_page, table->default_access_right, table);
construct_pgentry(pde, pt_page, table->get_default_access_right(), table);
}
}
add_pte(pde, paddr, vaddr, vaddr_end, prot, table);
@@ -479,7 +479,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
if (pdpte_large(*pdpte) != 0UL) {
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
} else {
if (!pgentry_present(table, (*pdpte))) {
if (!table->pgentry_present(*pdpte)) {
if (table->large_page_support(IA32E_PDPT, prot) &&
mem_aligned_check(paddr, PDPTE_SIZE) &&
mem_aligned_check(vaddr, PDPTE_SIZE) &&
@@ -494,7 +494,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
break; /* done */
} else {
void *pd_page = alloc_page(table->pool);
construct_pgentry(pdpte, pd_page, table->default_access_right, table);
construct_pgentry(pdpte, pd_page, table->get_default_access_right(), table);
}
}
add_pde(pdpte, paddr, vaddr, vaddr_end, prot, table);
@@ -565,9 +565,9 @@ void pgtable_add_map(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_ba
while (vaddr < vaddr_end) {
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
pml4e = pml4e_offset(pml4_page, vaddr);
if (!pgentry_present(table, (*pml4e))) {
if (!table->pgentry_present(*pml4e)) {
void *pdpt_page = alloc_page(table->pool);
construct_pgentry(pml4e, pdpt_page, table->default_access_right, table);
construct_pgentry(pml4e, pdpt_page, table->get_default_access_right(), table);
}
add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, table);
@@ -713,25 +713,25 @@ const uint64_t *pgtable_lookup_entry(uint64_t *pml4_page, uint64_t addr, uint64_
uint64_t *pml4e, *pdpte, *pde, *pte;
pml4e = pml4e_offset(pml4_page, addr);
present = pgentry_present(table, (*pml4e));
present = table->pgentry_present(*pml4e);
if (present) {
pdpte = pdpte_offset(pml4e, addr);
present = pgentry_present(table, (*pdpte));
present = table->pgentry_present(*pdpte);
if (present) {
if (pdpte_large(*pdpte) != 0UL) {
*pg_size = PDPTE_SIZE;
pret = pdpte;
} else {
pde = pde_offset(pdpte, addr);
present = pgentry_present(table, (*pde));
present = table->pgentry_present(*pde);
if (present) {
if (pde_large(*pde) != 0UL) {
*pg_size = PDE_SIZE;
pret = pde;
} else {
pte = pte_offset(pde, addr);
present = pgentry_present(table, (*pte));
present = table->pgentry_present(*pte);
if (present) {
*pg_size = PTE_SIZE;
pret = pte;
@@ -747,4 +747,4 @@ const uint64_t *pgtable_lookup_entry(uint64_t *pml4_page, uint64_t addr, uint64_
/**
* @}
*/
*/

View File

@@ -1,26 +1,29 @@
/*
* Copyright (C) 2018-2025 Intel Corporation.
* Copyright (C) 2018-2022 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <types.h>
#include <bits.h>
#include <lib/bits.h>
#include <asm/page.h>
#include <logmsg.h>
#include <util.h>
#include <mmu.h>
#include <asm/init.h>
#include <pgtable.h>
#include <acrn_hv_defs.h>
/**
* @addtogroup hwmgmt_page
*
* @{
*/
/**
* @file
* @brief Implementation of page management.
*
* This file provides the core functionality required for allocating and freeing memory pages. It's a fundamental
* support to manage memory resources.
*/
void init_page_pool(struct page_pool *pool, uint64_t *page_base, uint64_t *bitmap_base, int page_num)
{
uint64_t bitmap_size = page_num / 8;
pool->bitmap = (uint64_t *)bitmap_base;
pool->start_page = (struct page *)page_base;
pool->bitmap_size = bitmap_size / sizeof(uint64_t);
pool->dummy_page = NULL;
memset(pool->bitmap, 0, bitmap_size);
}
struct page *alloc_page(struct page_pool *pool)
{
@@ -70,7 +73,3 @@ void free_page(struct page_pool *pool, struct page *page)
bitmap_clear_non_atomic(bit, pool->bitmap + idx);
spinlock_release(&pool->lock);
}
/**
* @}
*/

View File

@@ -7,6 +7,7 @@
#ifndef EPT_H
#define EPT_H
#include <types.h>
#include <mmu.h>
typedef void (*pge_handler)(uint64_t *pgentry, uint64_t size);

View File

@@ -29,6 +29,7 @@
#include <asm/e820.h>
#include <asm/vm_config.h>
#include <io_req.h>
#include <mmu.h>
#ifdef CONFIG_HYPERV_ENABLED
#include <asm/guest/hyperv.h>
#endif

View File

@@ -174,7 +174,7 @@ void flush_tlb_range(uint64_t addr, uint64_t size);
void flush_invalidate_all_cache(void);
void flush_cacheline(const volatile void *p);
void flush_cache_range(const volatile void *p, uint64_t size);
void allocate_ppt_pages(void);
void ppt_page_pool_init(void);
/**
* @}

View File

@@ -95,45 +95,8 @@ struct page {
uint8_t contents[PAGE_SIZE]; /**< A 4-KByte page in the memory. */
} __aligned(PAGE_SIZE);
/**
* @brief Data structure that contains a pool of memory pages.
*
* This structure is designed to manage a collection of memory pages, facilitating efficient allocation,
* deallocation, and reuse of pages. It is typically used in scenarios where memory allocation performance
* is critical, such as in operating systems or high-performance applications. The page pool aims to minimize
* the overhead associated with frequent memory page allocations by maintaining a ready-to-use pool of pages.
* It is used to support the memory management in hypervisor and the extended page-table mechanism for VMs.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct page_pool {
struct page *start_page; /**< The first page in the pool. */
spinlock_t lock; /**< The spinlock to protect simultaneous access of the page pool. */
/**
* @brief A pointer to the bitmap that represents the allocation status of each page in the pool.
*
* The bitmap is a data structure that represents the allocation status of each page in the pool. Each bit in
* the bitmap corresponds to a page in the pool. If the bit is set to 1, the page is allocated; otherwise, the
* page is free. The bitmap is used to track the allocation status of each page in the pool.
*/
uint64_t *bitmap;
uint64_t bitmap_size; /**< The number of bitmap. */
uint64_t last_hint_id; /**< The last bitmap ID that is used to allocate a page. */
/**
* @brief A pointer to the dummy page
*
* This is used when there's no page available in the pool.
*/
struct page *dummy_page;
};
struct page *alloc_page(struct page_pool *pool);
void free_page(struct page_pool *pool, struct page *page);
#endif /* PAGE_H */
/**
* @}
*/
*/

View File

@@ -170,73 +170,6 @@ enum _page_table_level {
IA32E_PT = 3, /**< The Page-Table(PT) level in the page tables. */
};
/**
* @brief Data structure that contains the related operations and properties of page table.
*
* This structure is used to add/modify/delete page table.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct pgtable {
/**
* @brief Default memory access rights.
*
* A linear address can be translated to a physical address by the page tables. The translation is controlled by
* the memory access rights, as defined by the architecture's memory system design. The default memory access
* rights can be used to set the memory access rights for a page table entry when the page table is created.
*/
uint64_t default_access_right;
/**
* @brief Mask to check if the page referenced by entry is present.
*
* The presence of a page is indicated by specific bits in the entry, as defined by the architecture's memory
* system design. For example, in ept table entry it's indicated by bit0|bit1|bit2, and in mmu table entry it's
* indicated by bit 0.
*/
uint64_t pgentry_present_mask;
struct page_pool *pool; /**< Pointer to the page pool used for managing pages. */
/**
* @brief Function to check if large pages are supported.
*
* This function is used to check if large pages are supported for a specific page table level and memory access
* rights.
*/
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
void (*clflush_pagewalk)(const void *p); /**< Function to flush a page table entry from the cache. */
void (*tweak_exe_right)(uint64_t *entry); /**< Function to tweak execution rights for an entry. */
void (*recover_exe_right)(uint64_t *entry); /**< Function to recover execution rights for an entry. */
};
/**
* @brief Check whether the page referenced by the specified paging-structure entry is present or not.
*
* This function is used to check if the page referenced is present. A paging-structure entry references a page. The
* presence of a page is indicated by specific bits in the entry, as defined by the architecture's memory system design.
* For example, in ept table entry it's indicated by bit0|bit1|bit2, and in mmu table entry it's indicated by bit 0.
*
* This function checks whether the page referenced exists based on specific bits.
*
* @param[in] table A pointer to the structure pgtable which provides the mask to check whether page referenced is
* present or not.
* @param[in] pte The paging-structure entry to check.
*
* @return A boolean value indicating if the page referenced by the specified paging-structure entry is present
*
* @retval true Indicates the page referenced is present.
* @retval false Indicates the page referenced is not present.
*
* @pre table != NULL
*
* @post N/A
*/
static inline bool pgentry_present(const struct pgtable *table, uint64_t pte)
{
return ((table->pgentry_present_mask & (pte)) != 0UL);
}
/**
* @brief Translate a host physical address to a host virtual address before paging mode enabled.
*
@@ -436,7 +369,7 @@ static inline uint64_t get_pgentry(const uint64_t *pte)
static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct pgtable *table)
{
*ptep = pte;
table->clflush_pagewalk(ptep);
table->flush_cache_pagewalk(ptep);
}
/**

View File

@@ -6,9 +6,60 @@
#ifndef MMU_H
#define MMU_H
#include <lib/spinlock.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
void set_paging_supervisor(uint64_t base, uint64_t size);
/**
* @brief Data structure that contains a pool of memory pages.
*
* This structure is designed to manage a collection of memory pages, facilitating efficient allocation,
* deallocation, and reuse of pages. It is typically used in scenarios where memory allocation performance
* is critical, such as in operating systems or high-performance applications. The page pool aims to minimize
* the overhead associated with frequent memory page allocations by maintaining a ready-to-use pool of pages.
* It is used to support the memory management in hypervisor and the extended page-table mechanism for VMs.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct page_pool {
struct page *start_page; /**< The first page in the pool. */
spinlock_t lock; /**< The spinlock to protect simultaneous access of the page pool. */
/**
* @brief A pointer to the bitmap that represents the allocation status of each page in the pool.
*
* The bitmap is a data structure that represents the allocation status of each page in the pool. Each bit in
* the bitmap corresponds to a page in the pool. If the bit is set to 1, the page is allocated; otherwise, the
* page is free. The bitmap is used to track the allocation status of each page in the pool.
*/
uint64_t *bitmap;
uint64_t bitmap_size; /**< The number of bitmap. */
uint64_t last_hint_id; /**< The last bitmap ID that is used to allocate a page. */
/**
* @brief A pointer to the dummy page
*
* This is used when there's no page available in the pool.
*/
struct page *dummy_page;
};
struct pgtable {
struct page_pool *pool;
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
void (*flush_cache_pagewalk)(const void *p);
void (*tweak_exe_right)(uint64_t *entry);
void (*recover_exe_right)(uint64_t *entry);
};
void init_page_pool(struct page_pool *pool, uint64_t *page_base,
uint64_t *bitmap_base, int page_num);
struct page *alloc_page(struct page_pool *pool);
void free_page(struct page_pool *pool, struct page *page);
#endif /* MMU_H */