Files
acrn-hypervisor/hypervisor/include/common/mmu.h
hangliu1 c421a9557b hv:multiarch: move main memory interface to common
move below interface to common code.
pgtable_lookup_entry/pgtable_add_map/pgtable_modify_or_del_map

Tracked-On: #8831
Signed-off-by: hangliu1 <hang1.liu@intel.com>
Reviewed-by: Fei Li <fei1.li@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
2025-10-21 14:14:55 +08:00

97 lines
3.6 KiB
C

/*
* Copyright (C) 2023-2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef MMU_H
#define MMU_H
#include <lib/spinlock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
void set_paging_supervisor(uint64_t base, uint64_t size);
/**
* @brief Data structure that contains a pool of memory pages.
*
* This structure is designed to manage a collection of memory pages, facilitating efficient allocation,
* deallocation, and reuse of pages. It is typically used in scenarios where memory allocation performance
* is critical, such as in operating systems or high-performance applications. The page pool aims to minimize
* the overhead associated with frequent memory page allocations by maintaining a ready-to-use pool of pages.
* It is used to support the memory management in hypervisor and the extended page-table mechanism for VMs.
*
* @consistency N/A
* @alignment N/A
*
* @remark N/A
*/
struct page_pool {
struct page *start_page; /**< The first page in the pool. */
spinlock_t lock; /**< The spinlock to protect simultaneous access of the page pool. */
/**
* @brief A pointer to the bitmap that represents the allocation status of each page in the pool.
*
* The bitmap is a data structure that represents the allocation status of each page in the pool. Each bit in
* the bitmap corresponds to a page in the pool. If the bit is set to 1, the page is allocated; otherwise, the
* page is free. The bitmap is used to track the allocation status of each page in the pool.
*/
uint64_t *bitmap;
uint64_t bitmap_size; /**< The number of bitmap. */
uint64_t last_hint_id; /**< The last bitmap ID that is used to allocate a page. */
/**
* @brief A pointer to the dummy page
*
* This is used when there's no page available in the pool.
*/
struct page *dummy_page;
};
struct pgtable {
struct page_pool *pool;
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
void (*flush_cache_pagewalk)(const void *p);
void (*tweak_exe_right)(uint64_t *entry);
void (*recover_exe_right)(uint64_t *entry);
};
/*
* pgentry may means generic page table entry
*/
static inline uint64_t get_pgentry(const uint64_t *pte)
{
return *pte;
}
/*
* pgentry may means generic page table entry
*/
static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct pgtable *table)
{
*ptep = pte;
table->flush_cache_pagewalk(ptep);
}
void init_page_pool(struct page_pool *pool, uint64_t *page_base,
uint64_t *bitmap_base, int page_num);
struct page *alloc_page(struct page_pool *pool);
void free_page(struct page_pool *pool, struct page *page);
void init_sanitized_page(uint64_t *sanitized_page, uint64_t hpa);
void sanitize_pte_entry(uint64_t *ptep, const struct pgtable *table);
void sanitize_pte(uint64_t *pt_page, const struct pgtable *table);
void *pgtable_create_root(const struct pgtable *table);
const uint64_t *pgtable_lookup_entry(uint64_t *pml4_page, uint64_t addr,
uint64_t *pg_size, const struct pgtable *table);
void pgtable_add_map(uint64_t *pml4_page, uint64_t paddr_base,
uint64_t vaddr_base, uint64_t size,
uint64_t prot, const struct pgtable *table);
void pgtable_modify_or_del_map(uint64_t *pml4_page, uint64_t vaddr_base,
uint64_t size, uint64_t prot_set, uint64_t prot_clr,
const struct pgtable *table, uint32_t type);
#endif /* MMU_H */