mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-13 11:33:58 +00:00
hv: mmu: release 1GB cpu side support constrain
There're some platforms still doesn't support 1GB large page on CPU side. Such as lakefield, TNT and EHL platforms on which have some silicon bug and this case CPU don't support 1GB large page. This patch tries to release this constrain to support more hardware platform. Note this patch doesn't release the constrain on IOMMU side. Tracked-On: #4550 Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
@@ -385,9 +385,14 @@ static int32_t check_vmx_mmu_cap(void)
|
||||
!pcpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_GLOBAL_CONTEXT)) {
|
||||
printf("%s, invvpid not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE)) {
|
||||
printf("%s, ept not support 1GB large page\n", __func__);
|
||||
} else if (!pcpu_has_vmx_ept_cap(VMX_EPT_2MB_PAGE)) {
|
||||
printf("%s, ept not support 2MB large page\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE) !=
|
||||
pcpu_has_cap(X86_FEATURE_PAGE1GB)) {
|
||||
/* This just for simple large_page_support in arch/x86/page.c */
|
||||
ret = -ENODEV;
|
||||
printf("%s ept support 1GB large page while mmu is not or opposite\n", __func__);
|
||||
} else {
|
||||
/* No other state currently, do nothing */
|
||||
}
|
||||
@@ -439,9 +444,6 @@ int32_t detect_hardware_support(void)
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_CLFLUSHOPT)) {
|
||||
printf("%s, CLFLUSHOPT not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_PAGE1GB)) {
|
||||
printf("%s, not support 1GB page\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_VMX)) {
|
||||
printf("%s, vmx not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
#include <types.h>
|
||||
#include <rtl.h>
|
||||
#include <cpufeatures.h>
|
||||
#include <pgtable.h>
|
||||
#include <page.h>
|
||||
#include <mmu.h>
|
||||
@@ -28,6 +29,22 @@ static union pgtable_pages_info ppt_pages_info = {
|
||||
}
|
||||
};
|
||||
|
||||
/* @pre: The PPT and EPT have same page granularity */
|
||||
static inline bool large_page_support(enum _page_table_level level)
|
||||
{
|
||||
bool support;
|
||||
|
||||
if (level == IA32E_PD) {
|
||||
support = true;
|
||||
} else if (level == IA32E_PDPT) {
|
||||
support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE);
|
||||
} else {
|
||||
support = false;
|
||||
}
|
||||
|
||||
return support;
|
||||
}
|
||||
|
||||
static inline uint64_t ppt_get_default_access_right(void)
|
||||
{
|
||||
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
|
||||
@@ -68,7 +85,7 @@ static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))
|
||||
|
||||
const struct memory_ops ppt_mem_ops = {
|
||||
.info = &ppt_pages_info,
|
||||
.large_page_enabled = true,
|
||||
.large_page_support = large_page_support,
|
||||
.get_default_access_right = ppt_get_default_access_right,
|
||||
.pgentry_present = ppt_pgentry_present,
|
||||
.get_pml4_page = ppt_get_pml4_page,
|
||||
@@ -137,6 +154,11 @@ void *get_reserve_sworld_memory_base(void)
|
||||
return post_uos_sworld_memory;
|
||||
}
|
||||
|
||||
static inline bool large_page_not_support(__unused enum _page_table_level level)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline uint64_t ept_get_default_access_right(void)
|
||||
{
|
||||
return EPT_RWX;
|
||||
@@ -255,7 +277,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
|
||||
mem_ops->get_pd_page = ept_get_pd_page;
|
||||
mem_ops->get_pt_page = ept_get_pt_page;
|
||||
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
|
||||
mem_ops->large_page_enabled = true;
|
||||
mem_ops->large_page_support = large_page_support;
|
||||
|
||||
/* Mitigation for issue "Machine Check Error on Page Size Change" */
|
||||
if (is_ept_force_4k_ipage()) {
|
||||
@@ -263,7 +285,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
|
||||
mem_ops->recover_exe_right = ept_recover_exe_right;
|
||||
/* For RTVM, build 4KB page mapping in EPT */
|
||||
if (is_rt_vm(vm)) {
|
||||
mem_ops->large_page_enabled = false;
|
||||
mem_ops->large_page_support = large_page_not_support;
|
||||
}
|
||||
} else {
|
||||
mem_ops->tweak_exe_right = nop_tweak_exe_right;
|
||||
|
||||
@@ -296,7 +296,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
|
||||
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
|
||||
} else {
|
||||
if (mem_ops->pgentry_present(*pde) == 0UL) {
|
||||
if (mem_ops->large_page_enabled &&
|
||||
if (mem_ops->large_page_support(IA32E_PD) &&
|
||||
mem_aligned_check(paddr, PDE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
@@ -344,7 +344,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
|
||||
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
|
||||
} else {
|
||||
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
|
||||
if (mem_ops->large_page_enabled &&
|
||||
if (mem_ops->large_page_support(IA32E_PDPT) &&
|
||||
mem_aligned_check(paddr, PDPTE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDPTE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
|
||||
Reference in New Issue
Block a user