hv: ept: apply MCE on page size change mitigation conditionally

Only apply the software workaround on the models that might be
affected by MCE on page size change. For these models that are
known immune to the issue, the mitigation is turned off.

Atom processors are not afftected by the issue.
Also check the CPUID & MSR to check whether the model is immune to the issue:
CPU is not vulnerable when both CPUID.(EAX=07H,ECX=0H).EDX[29] and
IA32_ARCH_CAPABILITIES[IF_PSCHANGE_MC_NO] are 1.

Other cases not listed above, CPU may be vulnerable.

Tracked-On: #4121
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu 2019-11-08 09:30:06 +08:00 committed by wenlingz
parent 3834f6ac96
commit 241a811f42
4 changed files with 60 additions and 6 deletions

View File

@ -11,6 +11,7 @@
#include <trusty.h>
#include <vtd.h>
#include <vm_configurations.h>
#include <security.h>
static struct page ppt_pml4_pages[PML4_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
static struct page ppt_pdpt_pages[PDPT_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
@ -60,8 +61,8 @@ static inline struct page *ppt_get_pd_page(const union pgtable_pages_info *info,
return pd_page;
}
static inline void ppt_tweak_exe_right(uint64_t *entry __attribute__((unused))) {}
static inline void ppt_recover_exe_right(uint64_t *entry __attribute__((unused))) {}
static inline void nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {}
static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {}
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
@ -71,8 +72,8 @@ const struct memory_ops ppt_mem_ops = {
.get_pdpt_page = ppt_get_pdpt_page,
.get_pd_page = ppt_get_pd_page,
.clflush_pagewalk = ppt_clflush_pagewalk,
.tweak_exe_right = ppt_tweak_exe_right,
.recover_exe_right = ppt_recover_exe_right,
.tweak_exe_right = nop_tweak_exe_right,
.recover_exe_right = nop_recover_exe_right,
};
static struct page sos_vm_pml4_pages[PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
@ -204,7 +205,13 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->get_pd_page = ept_get_pd_page;
mem_ops->get_pt_page = ept_get_pt_page;
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
/* Mitigation for issue "Machine Check Error on Page Size Change" */
mem_ops->tweak_exe_right = ept_tweak_exe_right;
mem_ops->recover_exe_right = ept_recover_exe_right;
if (is_ept_force_4k_ipage()) {
mem_ops->tweak_exe_right = ept_tweak_exe_right;
mem_ops->recover_exe_right = ept_recover_exe_right;
} else {
mem_ops->tweak_exe_right = nop_tweak_exe_right;
mem_ops->recover_exe_right = nop_recover_exe_right;
}
}

View File

@ -184,3 +184,48 @@ void set_fs_base(void)
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
}
#endif
bool is_ept_force_4k_ipage(void)
{
bool force_4k_ipage = true;
const struct cpuinfo_x86 *info = get_pcpu_info();
uint64_t x86_arch_capabilities;
if (info->family == 0x6U) {
switch (info->model) {
case 0x26U:
case 0x27U:
case 0x35U:
case 0x36U:
case 0x37U:
case 0x86U:
case 0x1CU:
case 0x4AU:
case 0x4CU:
case 0x4DU:
case 0x5AU:
case 0x5CU:
case 0x5DU:
case 0x5FU:
case 0x6EU:
case 0x7AU:
/* Atom processor is not affected by the issue
* "Machine Check Error on Page Size Change"
*/
force_4k_ipage = false;
break;
default:
force_4k_ipage = true;
break;
}
}
if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
if ((x86_arch_capabilities & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0UL) {
force_4k_ipage = false;
}
}
return force_4k_ipage;
}

View File

@ -639,6 +639,7 @@ void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu);
#define IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3U)
#define IA32_ARCH_CAP_SSB_NO (1U << 4U)
#define IA32_ARCH_CAP_MDS_NO (1U << 5U)
#define IA32_ARCH_CAP_IF_PSCHANGE_MC_NO (1U << 6U)
/* Flush L1 D-cache */
#define IA32_L1D_FLUSH (1UL << 0U)

View File

@ -21,6 +21,7 @@ int32_t get_ibrs_type(void);
void cpu_l1d_flush(void);
bool check_cpu_security_cap(void);
void cpu_internal_buffers_clear(void);
bool is_ept_force_4k_ipage(void);
#ifdef STACK_PROTECTOR
struct stack_canary {