hv: ept: build 4KB page mapping in EPT for code pages of rtvm

RTVM is enforced to use 4KB pages to mitigate CVE-2018-12207 and performance jitter,
which may be introduced by splitting large page into 4KB pages on demand. It works
fine in previous hardware platform where the size of address space for the RTVM is
relatively small. However, this is a problem when the platforms support 64 bits
high MMIO space, which could be super large and therefore consumes large # of
EPT page table pages.

This patch optimize it by using large page for purely data pages, such as MMIO spaces,
even for the RTVM.

Signed-off-by: Li Fei1 <fei1.li@intel.com>
Tracked-On: #5788
This commit is contained in:
Li Fei1 2021-03-02 14:51:11 +08:00 committed by wenlingz
parent afe6a67a9f
commit b4a23e6c13
4 changed files with 20 additions and 8 deletions

View File

@ -113,6 +113,7 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
/*caused by instruction fetch */
if ((exit_qual & 0x4UL) != 0UL) {
/* TODO: check wehther the gpa is not a MMIO address. */
if (vcpu->arch.cur_context == NORMAL_WORLD) {
ept_modify_mr(vcpu->vm, (uint64_t *)vcpu->vm->arch_vm.nworld_eptp,
gpa & PAGE_MASK, PAGE_SIZE, EPT_EXE, 0UL);

View File

@ -88,7 +88,7 @@ void free_page(struct page_pool *pool, struct page *page)
}
/* @pre: The PPT and EPT have same page granularity */
static inline bool large_page_support(enum _page_table_level level)
static inline bool large_page_support(enum _page_table_level level, __unused uint64_t prot)
{
bool support;
@ -199,9 +199,20 @@ void *get_reserve_sworld_memory_base(void)
return post_uos_sworld_memory;
}
static inline bool large_page_not_support(__unused enum _page_table_level level)
/*
* Pages without execution right, such as MMIO, can always use large page
* base on hardware capability, even if the VM is an RTVM. This can save
* page table page # and improve TLB hit rate.
*/
static inline bool use_large_page(enum _page_table_level level, uint64_t prot)
{
return false;
bool ret = false; /* for code page */
if ((prot & EPT_EXE) == 0UL) {
ret = large_page_support(level, prot);
}
return ret;
}
static inline uint64_t ept_get_default_access_right(void)
@ -263,9 +274,9 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
if (is_ept_force_4k_ipage()) {
mem_ops->tweak_exe_right = ept_tweak_exe_right;
mem_ops->recover_exe_right = ept_recover_exe_right;
/* For RTVM, build 4KB page mapping in EPT */
/* For RTVM, build 4KB page mapping in EPT for code pages */
if (is_rt_vm(vm)) {
mem_ops->large_page_support = large_page_not_support;
mem_ops->large_page_support = use_large_page;
}
} else {
mem_ops->tweak_exe_right = nop_tweak_exe_right;

View File

@ -321,7 +321,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pde) == 0UL) {
if (mem_ops->large_page_support(IA32E_PD) &&
if (mem_ops->large_page_support(IA32E_PD, prot) &&
mem_aligned_check(paddr, PDE_SIZE) &&
mem_aligned_check(vaddr, PDE_SIZE) &&
(vaddr_next <= vaddr_end)) {
@ -370,7 +370,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
if (mem_ops->large_page_support(IA32E_PDPT) &&
if (mem_ops->large_page_support(IA32E_PDPT, prot) &&
mem_aligned_check(paddr, PDPTE_SIZE) &&
mem_aligned_check(vaddr, PDPTE_SIZE) &&
(vaddr_next <= vaddr_end)) {

View File

@ -68,7 +68,7 @@ struct page_pool {
struct memory_ops {
struct page_pool *pool;
bool (*large_page_support)(enum _page_table_level level);
bool (*large_page_support)(enum _page_table_level level, uint64_t prot);
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
void (*clflush_pagewalk)(const void *p);