diff --git a/hypervisor/Makefile b/hypervisor/Makefile index 568cbca0d..31498ff0f 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -137,6 +137,7 @@ C_SRCS += arch/x86/ioapic.c C_SRCS += arch/x86/lapic.c C_SRCS += arch/x86/cpu.c C_SRCS += arch/x86/cpu_caps.c +C_SRCS += arch/x86/security.c C_SRCS += arch/x86/mmu.c C_SRCS += arch/x86/e820.c C_SRCS += arch/x86/pagetable.c diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index a563a4ed9..1f9b51927 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -11,6 +11,7 @@ #include #include #include +#include struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE); uint16_t phys_cpu_num = 0U; @@ -67,29 +68,6 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state) per_cpu(boot_state, pcpu_id) = state; } -#ifdef STACK_PROTECTOR -static uint64_t get_random_value(void) -{ - uint64_t random = 0UL; - - asm volatile ("1: rdrand %%rax\n" - "jnc 1b\n" - "mov %%rax, %0\n" - : "=r"(random) - : - :"%rax"); - return random; -} - -static void set_fs_base(void) -{ - struct stack_canary *psc = &get_cpu_var(stk_canary); - - psc->canary = get_random_value(); - msr_write(MSR_IA32_FS_BASE, (uint64_t)psc); -} -#endif - void init_cpu_pre(uint16_t pcpu_id) { if (pcpu_id == BOOT_CPU_ID) { diff --git a/hypervisor/arch/x86/cpu_caps.c b/hypervisor/arch/x86/cpu_caps.c index 2ff0921cd..7a494520f 100644 --- a/hypervisor/arch/x86/cpu_caps.c +++ b/hypervisor/arch/x86/cpu_caps.c @@ -36,10 +36,6 @@ struct cpu_capability { struct cpuinfo_x86 boot_cpu_data; -static bool skip_l1dfl_vmentry; -static uint64_t x86_arch_capabilities; -int32_t ibrs_type; - bool cpu_has_cap(uint32_t bit) { uint32_t feat_idx = bit >> 5U; @@ -252,30 +248,6 @@ void init_cpu_capabilities(void) } detect_cpu_cap(); - - /* For speculation defence. - * The default way is to set IBRS at vmexit and then do IBPB at vcpu - * context switch(ibrs_type == IBRS_RAW). - * Now provide an optimized way (ibrs_type == IBRS_OPT) which set - * STIBP and do IBPB at vmexit,since having STIBP always set has less - * impact than having IBRS always set. Also since IBPB is already done - * at vmexit, it is no necessary to do so at vcpu context switch then. - */ - ibrs_type = IBRS_NONE; - - /* Currently for APL, if we enabled retpoline, then IBRS should not - * take effect - * TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS - * should be set all the time instead of relying on retpoline - */ -#ifndef CONFIG_RETPOLINE - if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) { - ibrs_type = IBRS_RAW; - if (cpu_has_cap(X86_FEATURE_STIBP)) { - ibrs_type = IBRS_OPT; - } - } -#endif } static bool is_ept_supported(void) @@ -334,43 +306,6 @@ void init_cpu_model_name(void) boot_cpu_data.model_name[48] = '\0'; } -bool check_cpu_security_cap(void) -{ - if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) { - x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES); - skip_l1dfl_vmentry = ((x86_arch_capabilities - & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL); - } else { - return false; - } - - if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) { - return false; - } - - if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) && - !cpu_has_cap(X86_FEATURE_STIBP)) { - return false; - } - - return true; -} - -void cpu_l1d_flush(void) -{ - /* - * 'skip_l1dfl_vmentry' will be true on platform that - * is not affected by L1TF. - * - */ - if (!skip_l1dfl_vmentry) { - if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) { - msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH); - } - } - -} - static inline bool is_vmx_disabled(void) { uint64_t msr_val; diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index eb1daf198..6aa95b580 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -7,6 +7,7 @@ #include #include #include +#include vm_sw_loader_t vm_sw_loader; diff --git a/hypervisor/arch/x86/security.c b/hypervisor/arch/x86/security.c new file mode 100644 index 000000000..048920596 --- /dev/null +++ b/hypervisor/arch/x86/security.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool skip_l1dfl_vmentry; +static uint64_t x86_arch_capabilities; +int32_t ibrs_type; + +static void detect_ibrs(void) +{ + /* For speculation defence. + * The default way is to set IBRS at vmexit and then do IBPB at vcpu + * context switch(ibrs_type == IBRS_RAW). + * Now provide an optimized way (ibrs_type == IBRS_OPT) which set + * STIBP and do IBPB at vmexit,since having STIBP always set has less + * impact than having IBRS always set. Also since IBPB is already done + * at vmexit, it is no necessary to do so at vcpu context switch then. + */ + ibrs_type = IBRS_NONE; + + /* Currently for APL, if we enabled retpoline, then IBRS should not + * take effect + * TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS + * should be set all the time instead of relying on retpoline + */ +#ifndef CONFIG_RETPOLINE + if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) { + ibrs_type = IBRS_RAW; + if (cpu_has_cap(X86_FEATURE_STIBP)) { + ibrs_type = IBRS_OPT; + } + } +#endif +} + +bool check_cpu_security_cap(void) +{ + detect_ibrs(); + + if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) { + x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES); + skip_l1dfl_vmentry = ((x86_arch_capabilities + & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL); + } else { + return false; + } + + if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) { + return false; + } + + if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) && + !cpu_has_cap(X86_FEATURE_STIBP)) { + return false; + } + + return true; +} + +void cpu_l1d_flush(void) +{ + /* + * 'skip_l1dfl_vmentry' will be true on platform that + * is not affected by L1TF. + * + */ + if (!skip_l1dfl_vmentry) { + if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) { + msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH); + } + } + +} + +#ifdef STACK_PROTECTOR +static uint64_t get_random_value(void) +{ + uint64_t random = 0UL; + + asm volatile ("1: rdrand %%rax\n" + "jnc 1b\n" + "mov %%rax, %0\n" + : "=r"(random) + : + :"%rax"); + return random; +} + +void set_fs_base(void) +{ + struct stack_canary *psc = &get_cpu_var(stk_canary); + + psc->canary = get_random_value(); + msr_write(MSR_IA32_FS_BASE, (uint64_t)psc); +} +#endif diff --git a/hypervisor/arch/x86/trusty.c b/hypervisor/arch/x86/trusty.c index 58d953c74..13d3eb9f6 100644 --- a/hypervisor/arch/x86/trusty.c +++ b/hypervisor/arch/x86/trusty.c @@ -6,6 +6,7 @@ #include #include +#include #define ACRN_DBG_TRUSTY 6U diff --git a/hypervisor/arch/x86/vmx_asm.S b/hypervisor/arch/x86/vmx_asm.S index 460b7b8fc..2cba0b73b 100644 --- a/hypervisor/arch/x86/vmx_asm.S +++ b/hypervisor/arch/x86/vmx_asm.S @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include /* NOTE: diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h index 824732a07..86211e83a 100644 --- a/hypervisor/include/arch/x86/cpu.h +++ b/hypervisor/include/arch/x86/cpu.h @@ -246,15 +246,6 @@ enum pcpu_boot_state { PCPU_STATE_DEAD, }; -#ifdef STACK_PROTECTOR -struct stack_canary { - /* Gcc generates extra code, using [fs:40] to access canary */ - uint8_t reserved[40]; - uint64_t canary; -}; -void __stack_chk_fail(void); -#endif - /* Function prototypes */ void cpu_do_idle(void); void cpu_dead(void); diff --git a/hypervisor/include/arch/x86/cpu_caps.h b/hypervisor/include/arch/x86/cpu_caps.h index 29f162280..2d92fbb2e 100644 --- a/hypervisor/include/arch/x86/cpu_caps.h +++ b/hypervisor/include/arch/x86/cpu_caps.h @@ -7,19 +7,6 @@ #ifndef CPUINFO_H #define CPUINFO_H -/* type of speculation control - * 0 - no speculation control support - * 1 - raw IBRS + IPBP support - * 2 - with STIBP optimization support - */ -#define IBRS_NONE 0 -#define IBRS_RAW 1 -#define IBRS_OPT 2 - -#ifndef ASSEMBLER - -extern int32_t ibrs_type; - struct cpu_state_info { uint8_t px_cnt; /* count of all Px states */ const struct cpu_px_data *px_data; @@ -72,6 +59,4 @@ bool check_cpu_security_cap(void); void cpu_l1d_flush(void); int detect_hardware_support(void); -#endif /* ASSEMBLER */ - #endif /* CPUINFO_H */ diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h index 6abfb7a53..de0b0e4c1 100644 --- a/hypervisor/include/arch/x86/per_cpu.h +++ b/hypervisor/include/arch/x86/per_cpu.h @@ -18,6 +18,7 @@ #include #include "arch/x86/guest/instr_emul.h" #include +#include struct per_cpu_region { /* vmxon_region MUST be 4KB-aligned */ diff --git a/hypervisor/include/arch/x86/security.h b/hypervisor/include/arch/x86/security.h new file mode 100644 index 000000000..bf417c86c --- /dev/null +++ b/hypervisor/include/arch/x86/security.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef SECURITY_H +#define SECURITY_H + +/* type of speculation control + * 0 - no speculation control support + * 1 - raw IBRS + IPBP support + * 2 - with STIBP optimization support + */ +#define IBRS_NONE 0 +#define IBRS_RAW 1 +#define IBRS_OPT 2 + +#ifndef ASSEMBLER +extern int32_t ibrs_type; +void cpu_l1d_flush(void); +bool check_cpu_security_cap(void); + +#ifdef STACK_PROTECTOR +struct stack_canary { + /* Gcc generates extra code, using [fs:40] to access canary */ + uint8_t reserved[40]; + uint64_t canary; +}; +void __stack_chk_fail(void); +void set_fs_base(void); +#endif + +#endif /* ASSEMBLER */ + +#endif /* SECURITY_H */