mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-16 06:19:24 +00:00
move security related funcs into security.c
there are still some security related funcs in cpu_caps.c & cpu.c, move them out into security.c. Changes to be committed: modified: Makefile modified: arch/x86/cpu.c modified: arch/x86/cpu_caps.c modified: arch/x86/guest/vcpu.c new file: arch/x86/security.c modified: arch/x86/trusty.c modified: arch/x86/vmx_asm.S modified: include/arch/x86/cpu.h modified: include/arch/x86/cpu_caps.h modified: include/arch/x86/per_cpu.h new file: include/arch/x86/security.h Tracked-On: #1842 Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
@@ -11,6 +11,7 @@
|
||||
#include <trampoline.h>
|
||||
#include <e820.h>
|
||||
#include <cpu_caps.h>
|
||||
#include <security.h>
|
||||
|
||||
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
|
||||
uint16_t phys_cpu_num = 0U;
|
||||
@@ -67,29 +68,6 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
|
||||
per_cpu(boot_state, pcpu_id) = state;
|
||||
}
|
||||
|
||||
#ifdef STACK_PROTECTOR
|
||||
static uint64_t get_random_value(void)
|
||||
{
|
||||
uint64_t random = 0UL;
|
||||
|
||||
asm volatile ("1: rdrand %%rax\n"
|
||||
"jnc 1b\n"
|
||||
"mov %%rax, %0\n"
|
||||
: "=r"(random)
|
||||
:
|
||||
:"%rax");
|
||||
return random;
|
||||
}
|
||||
|
||||
static void set_fs_base(void)
|
||||
{
|
||||
struct stack_canary *psc = &get_cpu_var(stk_canary);
|
||||
|
||||
psc->canary = get_random_value();
|
||||
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
|
||||
}
|
||||
#endif
|
||||
|
||||
void init_cpu_pre(uint16_t pcpu_id)
|
||||
{
|
||||
if (pcpu_id == BOOT_CPU_ID) {
|
||||
|
@@ -36,10 +36,6 @@ struct cpu_capability {
|
||||
|
||||
struct cpuinfo_x86 boot_cpu_data;
|
||||
|
||||
static bool skip_l1dfl_vmentry;
|
||||
static uint64_t x86_arch_capabilities;
|
||||
int32_t ibrs_type;
|
||||
|
||||
bool cpu_has_cap(uint32_t bit)
|
||||
{
|
||||
uint32_t feat_idx = bit >> 5U;
|
||||
@@ -252,30 +248,6 @@ void init_cpu_capabilities(void)
|
||||
}
|
||||
|
||||
detect_cpu_cap();
|
||||
|
||||
/* For speculation defence.
|
||||
* The default way is to set IBRS at vmexit and then do IBPB at vcpu
|
||||
* context switch(ibrs_type == IBRS_RAW).
|
||||
* Now provide an optimized way (ibrs_type == IBRS_OPT) which set
|
||||
* STIBP and do IBPB at vmexit,since having STIBP always set has less
|
||||
* impact than having IBRS always set. Also since IBPB is already done
|
||||
* at vmexit, it is no necessary to do so at vcpu context switch then.
|
||||
*/
|
||||
ibrs_type = IBRS_NONE;
|
||||
|
||||
/* Currently for APL, if we enabled retpoline, then IBRS should not
|
||||
* take effect
|
||||
* TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
|
||||
* should be set all the time instead of relying on retpoline
|
||||
*/
|
||||
#ifndef CONFIG_RETPOLINE
|
||||
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
|
||||
ibrs_type = IBRS_RAW;
|
||||
if (cpu_has_cap(X86_FEATURE_STIBP)) {
|
||||
ibrs_type = IBRS_OPT;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool is_ept_supported(void)
|
||||
@@ -334,43 +306,6 @@ void init_cpu_model_name(void)
|
||||
boot_cpu_data.model_name[48] = '\0';
|
||||
}
|
||||
|
||||
bool check_cpu_security_cap(void)
|
||||
{
|
||||
if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) {
|
||||
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
|
||||
skip_l1dfl_vmentry = ((x86_arch_capabilities
|
||||
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) &&
|
||||
!cpu_has_cap(X86_FEATURE_STIBP)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void cpu_l1d_flush(void)
|
||||
{
|
||||
/*
|
||||
* 'skip_l1dfl_vmentry' will be true on platform that
|
||||
* is not affected by L1TF.
|
||||
*
|
||||
*/
|
||||
if (!skip_l1dfl_vmentry) {
|
||||
if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
|
||||
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static inline bool is_vmx_disabled(void)
|
||||
{
|
||||
uint64_t msr_val;
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <hypervisor.h>
|
||||
#include <schedule.h>
|
||||
#include <vm0_boot.h>
|
||||
#include <security.h>
|
||||
|
||||
vm_sw_loader_t vm_sw_loader;
|
||||
|
||||
|
108
hypervisor/arch/x86/security.c
Normal file
108
hypervisor/arch/x86/security.c
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <msr.h>
|
||||
#include <cpufeatures.h>
|
||||
#include <spinlock.h>
|
||||
#include <cpu.h>
|
||||
#include <per_cpu.h>
|
||||
#include <cpu_caps.h>
|
||||
#include <security.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
static bool skip_l1dfl_vmentry;
|
||||
static uint64_t x86_arch_capabilities;
|
||||
int32_t ibrs_type;
|
||||
|
||||
static void detect_ibrs(void)
|
||||
{
|
||||
/* For speculation defence.
|
||||
* The default way is to set IBRS at vmexit and then do IBPB at vcpu
|
||||
* context switch(ibrs_type == IBRS_RAW).
|
||||
* Now provide an optimized way (ibrs_type == IBRS_OPT) which set
|
||||
* STIBP and do IBPB at vmexit,since having STIBP always set has less
|
||||
* impact than having IBRS always set. Also since IBPB is already done
|
||||
* at vmexit, it is no necessary to do so at vcpu context switch then.
|
||||
*/
|
||||
ibrs_type = IBRS_NONE;
|
||||
|
||||
/* Currently for APL, if we enabled retpoline, then IBRS should not
|
||||
* take effect
|
||||
* TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
|
||||
* should be set all the time instead of relying on retpoline
|
||||
*/
|
||||
#ifndef CONFIG_RETPOLINE
|
||||
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
|
||||
ibrs_type = IBRS_RAW;
|
||||
if (cpu_has_cap(X86_FEATURE_STIBP)) {
|
||||
ibrs_type = IBRS_OPT;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool check_cpu_security_cap(void)
|
||||
{
|
||||
detect_ibrs();
|
||||
|
||||
if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) {
|
||||
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
|
||||
skip_l1dfl_vmentry = ((x86_arch_capabilities
|
||||
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) &&
|
||||
!cpu_has_cap(X86_FEATURE_STIBP)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void cpu_l1d_flush(void)
|
||||
{
|
||||
/*
|
||||
* 'skip_l1dfl_vmentry' will be true on platform that
|
||||
* is not affected by L1TF.
|
||||
*
|
||||
*/
|
||||
if (!skip_l1dfl_vmentry) {
|
||||
if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
|
||||
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#ifdef STACK_PROTECTOR
|
||||
static uint64_t get_random_value(void)
|
||||
{
|
||||
uint64_t random = 0UL;
|
||||
|
||||
asm volatile ("1: rdrand %%rax\n"
|
||||
"jnc 1b\n"
|
||||
"mov %%rax, %0\n"
|
||||
: "=r"(random)
|
||||
:
|
||||
:"%rax");
|
||||
return random;
|
||||
}
|
||||
|
||||
void set_fs_base(void)
|
||||
{
|
||||
struct stack_canary *psc = &get_cpu_var(stk_canary);
|
||||
|
||||
psc->canary = get_random_value();
|
||||
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
|
||||
}
|
||||
#endif
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <crypto_api.h>
|
||||
#include <security.h>
|
||||
|
||||
#define ACRN_DBG_TRUSTY 6U
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
#include <guest.h>
|
||||
#include <vcpu.h>
|
||||
#include <cpu.h>
|
||||
#include <cpu_caps.h>
|
||||
#include <security.h>
|
||||
#include <types.h>
|
||||
|
||||
/* NOTE:
|
||||
|
Reference in New Issue
Block a user