mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-21 16:57:20 +00:00
hv: page: use dynamic page allocation for pagetable mapping
For FuSa's case, we remove all dynamic memory allocation use in ACRN HV. Instead, we use static memory allocation or embedded data structure. For pagetable page, we prefer to use an index (hva for MMU, gpa for EPT) to get a page from a special page pool. The special page pool should be big enougn for each possible index. This is not a big problem when we don't support 64 bits MMIO. Without 64 bits MMIO support, we could use the index to search addrss not larger than DRAM_SIZE + 4G. However, if ACRN plan to support 64 bits MMIO in SOS, we could not use the static memory alocation any more. This is because there's a very huge hole between the top DRAM address and the bottom 64 bits MMIO address. We could not reserve such many pages for pagetable mapping as the CPU physical address bits may very large. This patch will use dynamic page allocation for pagetable mapping. We also need reserve a big enough page pool at first. For HV MMU, we don't use 4K granularity page table mapping, we need reserve PML4, PDPT and PD pages according the maximum physical address space (PPT va and pa are identical mapping); For each VM EPT, we reserve PML4, PDPT and PD pages according to the maximum physical address space too, (the EPT address sapce can't beyond the physical address space), and we reserve PT pages by real use cases of DRAM, low MMIO and high MMIO. Signed-off-by: Li Fei1 <fei1.li@intel.com> Tracked-On: #5788
This commit is contained in:
@@ -18,18 +18,6 @@ typedef void (*pge_handler)(uint64_t *pgentry, uint64_t size);
|
||||
#define INVALID_HPA (0x1UL << 52U)
|
||||
#define INVALID_GPA (0x1UL << 52U)
|
||||
/* External Interfaces */
|
||||
/**
|
||||
* @brief Check whether pagetable pages is reserved enough for the GPA range or not
|
||||
*
|
||||
* @param[in] vm the pointer that points to VM data structure
|
||||
* @param[in] base The specified start guest physical address of guest
|
||||
* physical memory region
|
||||
* @param[in] size The size of guest physical memory region
|
||||
*
|
||||
* @retval true if pagetable pages is reserved enough for the GPA range, false otherwise.
|
||||
*/
|
||||
bool ept_is_mr_valid(const struct acrn_vm *vm, uint64_t base, uint64_t size);
|
||||
|
||||
/**
|
||||
* @brief Check if the GPA range is guest valid GPA or not
|
||||
*
|
||||
@@ -173,4 +161,13 @@ void walk_ept_table(struct acrn_vm *vm, pge_handler cb);
|
||||
*/
|
||||
int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu);
|
||||
|
||||
/**
|
||||
* @brief allocate a page from the VM's EPT pagetable page pool
|
||||
*
|
||||
* @param[in] vm the pointer that points to VM data structure
|
||||
*
|
||||
* @retval a page pointer if there's available used pages in the VM's EPT
|
||||
* pagetable page pool, null otherwise.
|
||||
*/
|
||||
struct page *alloc_ept_page(struct acrn_vm *vm);
|
||||
#endif /* EPT_H */
|
||||
|
@@ -7,12 +7,15 @@
|
||||
#ifndef PAGE_H
|
||||
#define PAGE_H
|
||||
|
||||
#include <spinlock.h>
|
||||
#include <board_info.h>
|
||||
|
||||
#define PAGE_SHIFT 12U
|
||||
#define PAGE_SIZE (1U << PAGE_SHIFT)
|
||||
#define PAGE_MASK 0xFFFFFFFFFFFFF000UL
|
||||
|
||||
#define MAXIMUM_PA_WIDTH 39U /* maximum physical-address width */
|
||||
|
||||
/* size of the low MMIO address space: 2GB */
|
||||
#define PLATFORM_LO_MMIO_SIZE 0x80000000UL
|
||||
|
||||
@@ -24,32 +27,6 @@
|
||||
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
|
||||
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
|
||||
|
||||
/*
|
||||
* The size of the guest physical address space, covered by the EPT page table of a VM.
|
||||
* With the assumptions:
|
||||
* - The GPA of DRAM & MMIO are contiguous.
|
||||
* - Guest OS won't re-program device MMIO bars to the address not covered by
|
||||
* this EPT_ADDRESS_SPACE.
|
||||
*/
|
||||
#define EPT_ADDRESS_SPACE(size) (((size) > MEM_2G) ? \
|
||||
((size) + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE) \
|
||||
: (MEM_2G + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE))
|
||||
|
||||
#define PTDEV_HI_MMIO_START ((CONFIG_UOS_RAM_SIZE > MEM_2G) ? \
|
||||
(CONFIG_UOS_RAM_SIZE + PLATFORM_LO_MMIO_SIZE) : (MEM_2G + PLATFORM_LO_MMIO_SIZE))
|
||||
|
||||
#define PRE_VM_EPT_ADDRESS_SPACE(size) (PTDEV_HI_MMIO_START + HI_MMIO_SIZE)
|
||||
|
||||
#define TOTAL_EPT_4K_PAGES_SIZE (PRE_VM_NUM*(PT_PAGE_NUM(PRE_VM_EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))*MEM_4K)) + \
|
||||
(SOS_VM_NUM*(PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))*MEM_4K)) + \
|
||||
(MAX_POST_VM_NUM*(PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))*MEM_4K))
|
||||
|
||||
#define TRUSTY_PML4_PAGE_NUM(size) (1UL)
|
||||
#define TRUSTY_PDPT_PAGE_NUM(size) (1UL)
|
||||
#define TRUSTY_PD_PAGE_NUM(size) (PD_PAGE_NUM(size))
|
||||
#define TRUSTY_PT_PAGE_NUM(size) (PT_PAGE_NUM(size))
|
||||
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
|
||||
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
|
||||
|
||||
/**
|
||||
* @brief Page tables level in IA32 paging mode
|
||||
@@ -79,32 +56,21 @@ struct page {
|
||||
uint8_t contents[PAGE_SIZE];
|
||||
} __aligned(PAGE_SIZE);
|
||||
|
||||
union pgtable_pages_info {
|
||||
struct {
|
||||
struct page *pml4_base;
|
||||
struct page *pdpt_base;
|
||||
struct page *pd_base;
|
||||
struct page *pt_base;
|
||||
} ppt;
|
||||
struct {
|
||||
uint64_t top_address_space;
|
||||
struct page *nworld_pml4_base;
|
||||
struct page *nworld_pdpt_base;
|
||||
struct page *nworld_pd_base;
|
||||
struct page *nworld_pt_base;
|
||||
struct page *sworld_pgtable_base;
|
||||
} ept;
|
||||
struct page_pool {
|
||||
struct page *start_page;
|
||||
spinlock_t lock;
|
||||
uint64_t bitmap_size;
|
||||
uint64_t *bitmap;
|
||||
uint64_t last_hint_id;
|
||||
|
||||
struct page *dummy_page;
|
||||
};
|
||||
|
||||
struct memory_ops {
|
||||
union pgtable_pages_info *info;
|
||||
struct page_pool *pool;
|
||||
bool (*large_page_support)(enum _page_table_level level);
|
||||
uint64_t (*get_default_access_right)(void);
|
||||
uint64_t (*pgentry_present)(uint64_t pte);
|
||||
struct page *(*get_pml4_page)(const union pgtable_pages_info *info);
|
||||
struct page *(*get_pdpt_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
||||
struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
||||
struct page *(*get_pt_page)(const union pgtable_pages_info *info, uint64_t gpa);
|
||||
void (*clflush_pagewalk)(const void *p);
|
||||
void (*tweak_exe_right)(uint64_t *entry);
|
||||
void (*recover_exe_right)(uint64_t *entry);
|
||||
@@ -112,9 +78,7 @@ struct memory_ops {
|
||||
|
||||
extern const struct memory_ops ppt_mem_ops;
|
||||
void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id);
|
||||
struct page *alloc_page(struct page_pool *pool);
|
||||
void *get_reserve_sworld_memory_base(void);
|
||||
|
||||
#ifdef CONFIG_LAST_LEVEL_EPT_AT_BOOT
|
||||
void reserve_buffer_for_ept_pages(void);
|
||||
#endif
|
||||
#endif /* PAGE_H */
|
||||
|
Reference in New Issue
Block a user