From 192859ee02969e8589702b419a8ddf9ed75fd288 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Fri, 8 Nov 2019 09:30:06 +0800 Subject: [PATCH] hv: ept: apply MCE on page size change mitigation conditionally Only apply the software workaround on the models that might be affected by MCE on page size change. For these models that are known immune to the issue, the mitigation is turned off. Atom processors are not afftected by the issue. Also check the CPUID & MSR to check whether the model is immune to the issue: CPU is not vulnerable when both CPUID.(EAX=07H,ECX=0H).EDX[29] and IA32_ARCH_CAPABILITIES[IF_PSCHANGE_MC_NO] are 1. Other cases not listed above, CPU may be vulnerable. This patch also changes MACROs for MSR IA32_ARCH_CAPABILITIES bits to UL instead of U since the MSR is 64bit. Tracked-On: #4101 Signed-off-by: Binbin Wu Acked-by: Eddie Dong --- hypervisor/arch/x86/page.c | 19 +++++++---- hypervisor/arch/x86/security.c | 45 ++++++++++++++++++++++++++ hypervisor/include/arch/x86/msr.h | 13 ++++---- hypervisor/include/arch/x86/security.h | 1 + 4 files changed, 66 insertions(+), 12 deletions(-) diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 9bb9def42..1328e5f2e 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -11,6 +11,7 @@ #include #include #include +#include static struct page ppt_pml4_pages[PML4_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)]; static struct page ppt_pdpt_pages[PDPT_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)]; @@ -60,8 +61,8 @@ static inline struct page *ppt_get_pd_page(const union pgtable_pages_info *info, return pd_page; } -static inline void ppt_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} -static inline void ppt_recover_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} +static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} const struct memory_ops ppt_mem_ops = { .info = &ppt_pages_info, @@ -71,8 +72,8 @@ const struct memory_ops ppt_mem_ops = { .get_pdpt_page = ppt_get_pdpt_page, .get_pd_page = ppt_get_pd_page, .clflush_pagewalk = ppt_clflush_pagewalk, - .tweak_exe_right = ppt_tweak_exe_right, - .recover_exe_right = ppt_recover_exe_right, + .tweak_exe_right = nop_tweak_exe_right, + .recover_exe_right = nop_recover_exe_right, }; static struct page sos_vm_pml4_pages[PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))]; @@ -204,7 +205,13 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id) mem_ops->get_pd_page = ept_get_pd_page; mem_ops->get_pt_page = ept_get_pt_page; mem_ops->clflush_pagewalk = ept_clflush_pagewalk; + /* Mitigation for issue "Machine Check Error on Page Size Change" */ - mem_ops->tweak_exe_right = ept_tweak_exe_right; - mem_ops->recover_exe_right = ept_recover_exe_right; + if (is_ept_force_4k_ipage()) { + mem_ops->tweak_exe_right = ept_tweak_exe_right; + mem_ops->recover_exe_right = ept_recover_exe_right; + } else { + mem_ops->tweak_exe_right = nop_tweak_exe_right; + mem_ops->recover_exe_right = nop_recover_exe_right; + } } diff --git a/hypervisor/arch/x86/security.c b/hypervisor/arch/x86/security.c index 6ad8c56e0..39fec5056 100644 --- a/hypervisor/arch/x86/security.c +++ b/hypervisor/arch/x86/security.c @@ -184,3 +184,48 @@ void set_fs_base(void) msr_write(MSR_IA32_FS_BASE, (uint64_t)psc); } #endif + +bool is_ept_force_4k_ipage(void) +{ + bool force_4k_ipage = true; + const struct cpuinfo_x86 *info = get_pcpu_info(); + uint64_t x86_arch_capabilities; + + if (info->family == 0x6U) { + switch (info->model) { + case 0x26U: + case 0x27U: + case 0x35U: + case 0x36U: + case 0x37U: + case 0x86U: + case 0x1CU: + case 0x4AU: + case 0x4CU: + case 0x4DU: + case 0x5AU: + case 0x5CU: + case 0x5DU: + case 0x5FU: + case 0x6EU: + case 0x7AU: + /* Atom processor is not affected by the issue + * "Machine Check Error on Page Size Change" + */ + force_4k_ipage = false; + break; + default: + force_4k_ipage = true; + break; + } + } + + if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) { + x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES); + if ((x86_arch_capabilities & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0UL) { + force_4k_ipage = false; + } + } + + return force_4k_ipage; +} diff --git a/hypervisor/include/arch/x86/msr.h b/hypervisor/include/arch/x86/msr.h index 149baa3b7..f0072b8d2 100644 --- a/hypervisor/include/arch/x86/msr.h +++ b/hypervisor/include/arch/x86/msr.h @@ -630,12 +630,13 @@ void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu); #define PRED_SET_IBPB (1U << 0U) /* IA32 ARCH Capabilities bit */ -#define IA32_ARCH_CAP_RDCL_NO (1U << 0U) -#define IA32_ARCH_CAP_IBRS_ALL (1U << 1U) -#define IA32_ARCH_CAP_RSBA (1U << 2U) -#define IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3U) -#define IA32_ARCH_CAP_SSB_NO (1U << 4U) -#define IA32_ARCH_CAP_MDS_NO (1U << 5U) +#define IA32_ARCH_CAP_RDCL_NO (1UL << 0U) +#define IA32_ARCH_CAP_IBRS_ALL (1UL << 1U) +#define IA32_ARCH_CAP_RSBA (1UL << 2U) +#define IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY (1UL << 3U) +#define IA32_ARCH_CAP_SSB_NO (1UL << 4U) +#define IA32_ARCH_CAP_MDS_NO (1UL << 5U) +#define IA32_ARCH_CAP_IF_PSCHANGE_MC_NO (1UL << 6U) /* Flush L1 D-cache */ #define IA32_L1D_FLUSH (1UL << 0U) diff --git a/hypervisor/include/arch/x86/security.h b/hypervisor/include/arch/x86/security.h index 4e93d60c7..b2a158de1 100644 --- a/hypervisor/include/arch/x86/security.h +++ b/hypervisor/include/arch/x86/security.h @@ -21,6 +21,7 @@ int32_t get_ibrs_type(void); void cpu_l1d_flush(void); bool check_cpu_security_cap(void); void cpu_internal_buffers_clear(void); +bool is_ept_force_4k_ipage(void); #ifdef STACK_PROTECTOR struct stack_canary {