From 5c71ca456a5dda90289a265c4cc3ea3f76272cb7 Mon Sep 17 00:00:00 2001 From: Li Fei1 Date: Wed, 10 Mar 2021 13:43:12 +0800 Subject: [PATCH] hv: pgatble: move the MMU page table related APIs to mmu.c Move the MMU page table related APIs to mmu.c. page module only provides APIs to allocate/free page for page table page. pagetabl module only provides APIs to add/modify/delete/lookup page table entry. The page pool and the page table related APIs for MMU should defined in MMU module. Tracked-On: #5830 Signed-off-by: Li Fei1 Reviewed-by: Jason Chen CJ --- hypervisor/arch/x86/mmu.c | 58 ++++++++++++++++++++++++++++++ hypervisor/arch/x86/page.c | 58 ------------------------------ hypervisor/include/arch/x86/page.h | 1 - 3 files changed, 58 insertions(+), 59 deletions(-) diff --git a/hypervisor/arch/x86/mmu.c b/hypervisor/arch/x86/mmu.c index ae3d103d7..534ffe81b 100644 --- a/hypervisor/arch/x86/mmu.c +++ b/hypervisor/arch/x86/mmu.c @@ -43,6 +43,64 @@ static void *ppt_mmu_pml4_addr; static uint8_t sanitized_page[PAGE_SIZE] __aligned(PAGE_SIZE); +/* PPT VA and PA are identical mapping */ +#define PPT_PML4_PAGE_NUM PML4_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) +#define PPT_PDPT_PAGE_NUM PDPT_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) +#define PPT_PD_PAGE_NUM PD_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) +#define PPT_PT_PAGE_NUM 0UL /* not support 4K granularity page mapping */ +/* must be a multiple of 64 */ +#define PPT_PAGE_NUM (roundup((PPT_PML4_PAGE_NUM + PPT_PDPT_PAGE_NUM + \ + PPT_PD_PAGE_NUM + PPT_PT_PAGE_NUM), 64U)) +static struct page ppt_pages[PPT_PAGE_NUM]; +static uint64_t ppt_page_bitmap[PPT_PAGE_NUM / 64]; + +/* ppt: pripary page pool */ +static struct page_pool ppt_page_pool = { + .start_page = ppt_pages, + .bitmap_size = PPT_PAGE_NUM / 64, + .bitmap = ppt_page_bitmap, + .last_hint_id = 0UL, + .dummy_page = NULL, +}; + +/* @pre: The PPT and EPT have same page granularity */ +static inline bool ppt_large_page_support(enum _page_table_level level, __unused uint64_t prot) +{ + bool support; + + if (level == IA32E_PD) { + support = true; + } else if (level == IA32E_PDPT) { + support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE); + } else { + support = false; + } + + return support; +} + +static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused))) +{ +} + +static inline uint64_t ppt_pgentry_present(uint64_t pte) +{ + return pte & PAGE_PRESENT; +} + +static inline void ppt_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void ppt_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} + +static const struct pgtable ppt_pgtable = { + .default_access_right = (PAGE_PRESENT | PAGE_RW | PAGE_USER), + .pool = &ppt_page_pool, + .large_page_support = ppt_large_page_support, + .pgentry_present = ppt_pgentry_present, + .clflush_pagewalk = ppt_clflush_pagewalk, + .tweak_exe_right = ppt_nop_tweak_exe_right, + .recover_exe_right = ppt_nop_recover_exe_right, +}; + #define INVEPT_TYPE_SINGLE_CONTEXT 1UL #define INVEPT_TYPE_ALL_CONTEXTS 2UL #define VMFAIL_INVALID_EPT_VPID \ diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 8af66c80c..18cd6a29c 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -16,26 +16,6 @@ #include -/* PPT VA and PA are identical mapping */ -#define PPT_PML4_PAGE_NUM PML4_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) -#define PPT_PDPT_PAGE_NUM PDPT_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) -#define PPT_PD_PAGE_NUM PD_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) -#define PPT_PT_PAGE_NUM 0UL /* not support 4K granularity page mapping */ -/* must be a multiple of 64 */ -#define PPT_PAGE_NUM (roundup((PPT_PML4_PAGE_NUM + PPT_PDPT_PAGE_NUM + \ - PPT_PD_PAGE_NUM + PPT_PT_PAGE_NUM), 64U)) -static struct page ppt_pages[PPT_PAGE_NUM]; -static uint64_t ppt_page_bitmap[PPT_PAGE_NUM / 64]; - -/* ppt: pripary page pool */ -static struct page_pool ppt_page_pool = { - .start_page = ppt_pages, - .bitmap_size = PPT_PAGE_NUM / 64, - .bitmap = ppt_page_bitmap, - .last_hint_id = 0UL, - .dummy_page = NULL, -}; - struct page *alloc_page(struct page_pool *pool) { struct page *page = NULL; @@ -85,44 +65,6 @@ void free_page(struct page_pool *pool, struct page *page) spinlock_release(&pool->lock); } -/* @pre: The PPT and EPT have same page granularity */ -static inline bool ppt_large_page_support(enum _page_table_level level, __unused uint64_t prot) -{ - bool support; - - if (level == IA32E_PD) { - support = true; - } else if (level == IA32E_PDPT) { - support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE); - } else { - support = false; - } - - return support; -} - -static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused))) -{ -} - -static inline uint64_t ppt_pgentry_present(uint64_t pte) -{ - return pte & PAGE_PRESENT; -} - -static inline void ppt_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} -static inline void ppt_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} - -const struct pgtable ppt_pgtable = { - .default_access_right = (PAGE_PRESENT | PAGE_RW | PAGE_USER), - .pool = &ppt_page_pool, - .large_page_support = ppt_large_page_support, - .pgentry_present = ppt_pgentry_present, - .clflush_pagewalk = ppt_clflush_pagewalk, - .tweak_exe_right = ppt_nop_tweak_exe_right, - .recover_exe_right = ppt_nop_recover_exe_right, -}; - /* EPT address space will not beyond the platform physical address space */ #define EPT_PML4_PAGE_NUM PML4_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) #define EPT_PDPT_PAGE_NUM PDPT_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) diff --git a/hypervisor/include/arch/x86/page.h b/hypervisor/include/arch/x86/page.h index d1170fe2d..75eaf36cf 100644 --- a/hypervisor/include/arch/x86/page.h +++ b/hypervisor/include/arch/x86/page.h @@ -77,7 +77,6 @@ struct pgtable { void (*recover_exe_right)(uint64_t *entry); }; -extern const struct pgtable ppt_pgtable; void init_ept_pgtable(struct pgtable *table, uint16_t vm_id); struct page *alloc_page(struct page_pool *pool); void free_page(struct page_pool *pool, struct page *page);