move security related funcs into security.c

there are still some security related funcs in cpu_caps.c & cpu.c,
move them out into security.c.

Changes to be committed:
	modified:   Makefile
	modified:   arch/x86/cpu.c
	modified:   arch/x86/cpu_caps.c
	modified:   arch/x86/guest/vcpu.c
	new file:   arch/x86/security.c
	modified:   arch/x86/trusty.c
	modified:   arch/x86/vmx_asm.S
	modified:   include/arch/x86/cpu.h
	modified:   include/arch/x86/cpu_caps.h
	modified:   include/arch/x86/per_cpu.h
	new file:   include/arch/x86/security.h

Tracked-On: #1842
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Jason Chen CJ 2018-12-18 09:28:27 +08:00 committed by wenlingz
parent 0ad6da998c
commit 5968da4635
11 changed files with 150 additions and 113 deletions

View File

@ -137,6 +137,7 @@ C_SRCS += arch/x86/ioapic.c
C_SRCS += arch/x86/lapic.c
C_SRCS += arch/x86/cpu.c
C_SRCS += arch/x86/cpu_caps.c
C_SRCS += arch/x86/security.c
C_SRCS += arch/x86/mmu.c
C_SRCS += arch/x86/e820.c
C_SRCS += arch/x86/pagetable.c

View File

@ -11,6 +11,7 @@
#include <trampoline.h>
#include <e820.h>
#include <cpu_caps.h>
#include <security.h>
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
uint16_t phys_cpu_num = 0U;
@ -67,29 +68,6 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
per_cpu(boot_state, pcpu_id) = state;
}
#ifdef STACK_PROTECTOR
static uint64_t get_random_value(void)
{
uint64_t random = 0UL;
asm volatile ("1: rdrand %%rax\n"
"jnc 1b\n"
"mov %%rax, %0\n"
: "=r"(random)
:
:"%rax");
return random;
}
static void set_fs_base(void)
{
struct stack_canary *psc = &get_cpu_var(stk_canary);
psc->canary = get_random_value();
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
}
#endif
void init_cpu_pre(uint16_t pcpu_id)
{
if (pcpu_id == BOOT_CPU_ID) {

View File

@ -36,10 +36,6 @@ struct cpu_capability {
struct cpuinfo_x86 boot_cpu_data;
static bool skip_l1dfl_vmentry;
static uint64_t x86_arch_capabilities;
int32_t ibrs_type;
bool cpu_has_cap(uint32_t bit)
{
uint32_t feat_idx = bit >> 5U;
@ -252,30 +248,6 @@ void init_cpu_capabilities(void)
}
detect_cpu_cap();
/* For speculation defence.
* The default way is to set IBRS at vmexit and then do IBPB at vcpu
* context switch(ibrs_type == IBRS_RAW).
* Now provide an optimized way (ibrs_type == IBRS_OPT) which set
* STIBP and do IBPB at vmexit,since having STIBP always set has less
* impact than having IBRS always set. Also since IBPB is already done
* at vmexit, it is no necessary to do so at vcpu context switch then.
*/
ibrs_type = IBRS_NONE;
/* Currently for APL, if we enabled retpoline, then IBRS should not
* take effect
* TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
* should be set all the time instead of relying on retpoline
*/
#ifndef CONFIG_RETPOLINE
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
ibrs_type = IBRS_RAW;
if (cpu_has_cap(X86_FEATURE_STIBP)) {
ibrs_type = IBRS_OPT;
}
}
#endif
}
static bool is_ept_supported(void)
@ -334,43 +306,6 @@ void init_cpu_model_name(void)
boot_cpu_data.model_name[48] = '\0';
}
bool check_cpu_security_cap(void)
{
if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) {
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
skip_l1dfl_vmentry = ((x86_arch_capabilities
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
} else {
return false;
}
if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
return false;
}
if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) &&
!cpu_has_cap(X86_FEATURE_STIBP)) {
return false;
}
return true;
}
void cpu_l1d_flush(void)
{
/*
* 'skip_l1dfl_vmentry' will be true on platform that
* is not affected by L1TF.
*
*/
if (!skip_l1dfl_vmentry) {
if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
}
}
}
static inline bool is_vmx_disabled(void)
{
uint64_t msr_val;

View File

@ -7,6 +7,7 @@
#include <hypervisor.h>
#include <schedule.h>
#include <vm0_boot.h>
#include <security.h>
vm_sw_loader_t vm_sw_loader;

View File

@ -0,0 +1,108 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <types.h>
#include <msr.h>
#include <cpufeatures.h>
#include <spinlock.h>
#include <cpu.h>
#include <per_cpu.h>
#include <cpu_caps.h>
#include <security.h>
#include <logmsg.h>
static bool skip_l1dfl_vmentry;
static uint64_t x86_arch_capabilities;
int32_t ibrs_type;
static void detect_ibrs(void)
{
/* For speculation defence.
* The default way is to set IBRS at vmexit and then do IBPB at vcpu
* context switch(ibrs_type == IBRS_RAW).
* Now provide an optimized way (ibrs_type == IBRS_OPT) which set
* STIBP and do IBPB at vmexit,since having STIBP always set has less
* impact than having IBRS always set. Also since IBPB is already done
* at vmexit, it is no necessary to do so at vcpu context switch then.
*/
ibrs_type = IBRS_NONE;
/* Currently for APL, if we enabled retpoline, then IBRS should not
* take effect
* TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
* should be set all the time instead of relying on retpoline
*/
#ifndef CONFIG_RETPOLINE
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
ibrs_type = IBRS_RAW;
if (cpu_has_cap(X86_FEATURE_STIBP)) {
ibrs_type = IBRS_OPT;
}
}
#endif
}
bool check_cpu_security_cap(void)
{
detect_ibrs();
if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) {
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
skip_l1dfl_vmentry = ((x86_arch_capabilities
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
} else {
return false;
}
if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
return false;
}
if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) &&
!cpu_has_cap(X86_FEATURE_STIBP)) {
return false;
}
return true;
}
void cpu_l1d_flush(void)
{
/*
* 'skip_l1dfl_vmentry' will be true on platform that
* is not affected by L1TF.
*
*/
if (!skip_l1dfl_vmentry) {
if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
}
}
}
#ifdef STACK_PROTECTOR
static uint64_t get_random_value(void)
{
uint64_t random = 0UL;
asm volatile ("1: rdrand %%rax\n"
"jnc 1b\n"
"mov %%rax, %0\n"
: "=r"(random)
:
:"%rax");
return random;
}
void set_fs_base(void)
{
struct stack_canary *psc = &get_cpu_var(stk_canary);
psc->canary = get_random_value();
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
}
#endif

View File

@ -6,6 +6,7 @@
#include <hypervisor.h>
#include <crypto_api.h>
#include <security.h>
#define ACRN_DBG_TRUSTY 6U

View File

@ -9,7 +9,7 @@
#include <guest.h>
#include <vcpu.h>
#include <cpu.h>
#include <cpu_caps.h>
#include <security.h>
#include <types.h>
/* NOTE:

View File

@ -246,15 +246,6 @@ enum pcpu_boot_state {
PCPU_STATE_DEAD,
};
#ifdef STACK_PROTECTOR
struct stack_canary {
/* Gcc generates extra code, using [fs:40] to access canary */
uint8_t reserved[40];
uint64_t canary;
};
void __stack_chk_fail(void);
#endif
/* Function prototypes */
void cpu_do_idle(void);
void cpu_dead(void);

View File

@ -7,19 +7,6 @@
#ifndef CPUINFO_H
#define CPUINFO_H
/* type of speculation control
* 0 - no speculation control support
* 1 - raw IBRS + IPBP support
* 2 - with STIBP optimization support
*/
#define IBRS_NONE 0
#define IBRS_RAW 1
#define IBRS_OPT 2
#ifndef ASSEMBLER
extern int32_t ibrs_type;
struct cpu_state_info {
uint8_t px_cnt; /* count of all Px states */
const struct cpu_px_data *px_data;
@ -72,6 +59,4 @@ bool check_cpu_security_cap(void);
void cpu_l1d_flush(void);
int detect_hardware_support(void);
#endif /* ASSEMBLER */
#endif /* CPUINFO_H */

View File

@ -18,6 +18,7 @@
#include <logmsg.h>
#include "arch/x86/guest/instr_emul.h"
#include <profiling.h>
#include <security.h>
struct per_cpu_region {
/* vmxon_region MUST be 4KB-aligned */

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SECURITY_H
#define SECURITY_H
/* type of speculation control
* 0 - no speculation control support
* 1 - raw IBRS + IPBP support
* 2 - with STIBP optimization support
*/
#define IBRS_NONE 0
#define IBRS_RAW 1
#define IBRS_OPT 2
#ifndef ASSEMBLER
extern int32_t ibrs_type;
void cpu_l1d_flush(void);
bool check_cpu_security_cap(void);
#ifdef STACK_PROTECTOR
struct stack_canary {
/* Gcc generates extra code, using [fs:40] to access canary */
uint8_t reserved[40];
uint64_t canary;
};
void __stack_chk_fail(void);
void set_fs_base(void);
#endif
#endif /* ASSEMBLER */
#endif /* SECURITY_H */