From 15d68675e9c0ce85312d111eb12f4b1152bc4ba0 Mon Sep 17 00:00:00 2001 From: Li Fei1 Date: Wed, 10 Mar 2021 12:00:25 +0800 Subject: [PATCH] hv: pgtable: separate common APIs for MMU/EPT We would move the MMU page table related APIs to mmu.c and move the EPT related APIs to EPT.c. The page table module only provides APIs to add/modify/delete/lookup page table entry. This patch separates common APIs and adds separate APIs of page table module for MMU/EPT. Tracked-On: #5830 Signed-off-by: Li Fei1 --- hypervisor/arch/x86/page.c | 41 +++++++++++++++++++++--------- hypervisor/include/arch/x86/page.h | 1 + 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 70e5723dc..8af66c80c 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -16,8 +16,6 @@ #include -#define MAX_PHY_ADDRESS_SPACE (1UL << MAXIMUM_PA_WIDTH) - /* PPT VA and PA are identical mapping */ #define PPT_PML4_PAGE_NUM PML4_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) #define PPT_PDPT_PAGE_NUM PDPT_PAGE_NUM(MAX_PHY_ADDRESS_SPACE) @@ -88,7 +86,7 @@ void free_page(struct page_pool *pool, struct page *page) } /* @pre: The PPT and EPT have same page granularity */ -static inline bool large_page_support(enum _page_table_level level, __unused uint64_t prot) +static inline bool ppt_large_page_support(enum _page_table_level level, __unused uint64_t prot) { bool support; @@ -112,17 +110,17 @@ static inline uint64_t ppt_pgentry_present(uint64_t pte) return pte & PAGE_PRESENT; } -static inline void nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} -static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void ppt_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void ppt_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} const struct pgtable ppt_pgtable = { .default_access_right = (PAGE_PRESENT | PAGE_RW | PAGE_USER), .pool = &ppt_page_pool, - .large_page_support = large_page_support, + .large_page_support = ppt_large_page_support, .pgentry_present = ppt_pgentry_present, .clflush_pagewalk = ppt_clflush_pagewalk, - .tweak_exe_right = nop_tweak_exe_right, - .recover_exe_right = nop_recover_exe_right, + .tweak_exe_right = ppt_nop_tweak_exe_right, + .recover_exe_right = ppt_nop_recover_exe_right, }; /* EPT address space will not beyond the platform physical address space */ @@ -186,6 +184,22 @@ void reserve_buffer_for_ept_pages(void) } } +/* @pre: The PPT and EPT have same page granularity */ +static inline bool ept_large_page_support(enum _page_table_level level, __unused uint64_t prot) +{ + bool support; + + if (level == IA32E_PD) { + support = true; + } else if (level == IA32E_PDPT) { + support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE); + } else { + support = false; + } + + return support; +} + /* * Pages without execution right, such as MMIO, can always use large page * base on hardware capability, even if the VM is an RTVM. This can save @@ -196,7 +210,7 @@ static inline bool use_large_page(enum _page_table_level level, uint64_t prot) bool ret = false; /* for code page */ if ((prot & EPT_EXE) == 0UL) { - ret = large_page_support(level, prot); + ret = ept_large_page_support(level, prot); } return ret; @@ -212,6 +226,9 @@ static inline void ept_clflush_pagewalk(const void* etry) iommu_flush_cache(etry, sizeof(uint64_t)); } +static inline void ept_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void ept_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} + /* The function is used to disable execute right for (2MB / 1GB)large pages in EPT */ static inline void ept_tweak_exe_right(uint64_t *entry) { @@ -243,7 +260,7 @@ void init_ept_pgtable(struct pgtable *table, uint16_t vm_id) table->default_access_right = EPT_RWX; table->pgentry_present = ept_pgentry_present; table->clflush_pagewalk = ept_clflush_pagewalk; - table->large_page_support = large_page_support; + table->large_page_support = ept_large_page_support; /* Mitigation for issue "Machine Check Error on Page Size Change" */ if (is_ept_force_4k_ipage()) { @@ -254,7 +271,7 @@ void init_ept_pgtable(struct pgtable *table, uint16_t vm_id) table->large_page_support = use_large_page; } } else { - table->tweak_exe_right = nop_tweak_exe_right; - table->recover_exe_right = nop_recover_exe_right; + table->tweak_exe_right = ept_nop_tweak_exe_right; + table->recover_exe_right = ept_nop_recover_exe_right; } } diff --git a/hypervisor/include/arch/x86/page.h b/hypervisor/include/arch/x86/page.h index e0a27f08c..d1170fe2d 100644 --- a/hypervisor/include/arch/x86/page.h +++ b/hypervisor/include/arch/x86/page.h @@ -15,6 +15,7 @@ #define PAGE_MASK 0xFFFFFFFFFFFFF000UL #define MAXIMUM_PA_WIDTH 39U /* maximum physical-address width */ +#define MAX_PHY_ADDRESS_SPACE (1UL << MAXIMUM_PA_WIDTH) /* size of the low MMIO address space: 2GB */ #define PLATFORM_LO_MMIO_SIZE 0x80000000UL