hv: mmu: release 1GB cpu side support constrain

There're some platforms still doesn't support 1GB large page on CPU side.
Such as lakefield, TNT and EHL platforms on which have some silicon bug and
this case CPU don't support 1GB large page.

This patch tries to release this constrain to support more hardware platform.

Note this patch doesn't release the constrain on IOMMU side.

Tracked-On: #4550
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1
2020-06-05 09:50:56 +08:00
committed by wenlingz
parent 6e57553015
commit 65e4a16e6a
5 changed files with 57 additions and 33 deletions

View File

@@ -5,6 +5,7 @@
*/
#include <types.h>
#include <rtl.h>
#include <cpufeatures.h>
#include <pgtable.h>
#include <page.h>
#include <mmu.h>
@@ -28,6 +29,22 @@ static union pgtable_pages_info ppt_pages_info = {
}
};
/* @pre: The PPT and EPT have same page granularity */
static inline bool large_page_support(enum _page_table_level level)
{
bool support;
if (level == IA32E_PD) {
support = true;
} else if (level == IA32E_PDPT) {
support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE);
} else {
support = false;
}
return support;
}
static inline uint64_t ppt_get_default_access_right(void)
{
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
@@ -68,7 +85,7 @@ static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
.large_page_enabled = true,
.large_page_support = large_page_support,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.get_pml4_page = ppt_get_pml4_page,
@@ -137,6 +154,11 @@ void *get_reserve_sworld_memory_base(void)
return post_uos_sworld_memory;
}
static inline bool large_page_not_support(__unused enum _page_table_level level)
{
return false;
}
static inline uint64_t ept_get_default_access_right(void)
{
return EPT_RWX;
@@ -255,7 +277,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->get_pd_page = ept_get_pd_page;
mem_ops->get_pt_page = ept_get_pt_page;
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
mem_ops->large_page_enabled = true;
mem_ops->large_page_support = large_page_support;
/* Mitigation for issue "Machine Check Error on Page Size Change" */
if (is_ept_force_4k_ipage()) {
@@ -263,7 +285,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->recover_exe_right = ept_recover_exe_right;
/* For RTVM, build 4KB page mapping in EPT */
if (is_rt_vm(vm)) {
mem_ops->large_page_enabled = false;
mem_ops->large_page_support = large_page_not_support;
}
} else {
mem_ops->tweak_exe_right = nop_tweak_exe_right;