mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-05-02 05:34:04 +00:00
KeyLocker is a new security feature available in new Intel CPUs that protects data-encryption keys for the Advanced Encryption Standard (AES) algorithm. These keys are more valuable than what they guard. If stolen once, the key can be repeatedly used even on another system and even after vulnerability closed. It also introduces a CPU-internal wrapping key (IWKey), which is a key- encryption key to wrap AES keys into handles. While the IWKey is inaccessible to software, randomizing the value during the boot-time helps its value unpredictable. Keylocker usage: - New “ENCODEKEY” instructions take original key input and returns HANDLE crypted by an internal wrap key (IWKey, init by “LOADIWKEY” instruction) - Software can then delete the original key from memory - Early in boot/software, less likely to have vulnerability that allows stealing original key - Later encrypt/decrypt can use the HANDLE through new AES KeyLocker instructions - Note: * Software can use original key without knowing it (use HANDLE) * HANDLE cannot be used on other systems or after warm/cold reset * IWKey cannot be read from CPU after it's loaded (this is the nature of this feature) and only 1 copy of IWKey inside CPU. The virtualization implementation of Key Locker on ACRN is: - Each vCPU has a 'struct iwkey' to store its IWKey in struct acrn_vcpu_arch. - At initilization, every vCPU is created with a random IWKey. - Hypervisor traps the execution of LOADIWKEY (by 'LOADIWKEY exiting' VM-exectuion control) of vCPU to capture and save the IWKey if guest set a new IWKey. Don't support randomization (emulate CPUID to disable) of the LOADIWKEY as hypervisor cannot capture and save the random IWKey. From keylocker spec: "Note that a VMM may wish to enumerate no support for HW random IWKeys to the guest (i.e. enumerate CPUID.19H:ECX[1] as 0) as such IWKeys cannot be easily context switched. A guest ENCODEKEY will return the type of IWKey used (IWKey.KeySource) and thus will notice if a VMM virtualized a HW random IWKey with a SW specified IWKey." - In context_switch_in() of each vCPU, hypervisor loads that vCPU's IWKey into pCPU by LOADIWKEY instruction. - There is an assumption that ACRN hypervisor will never use the KeyLocker feature itself. This patch implements the vCPU's IWKey management and the next patch implements host context save/restore IWKey logic. Tracked-On: #5695 Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
239 lines
5.9 KiB
C
239 lines
5.9 KiB
C
/*
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <types.h>
|
|
#include <msr.h>
|
|
#include <cpufeatures.h>
|
|
#include <cpu.h>
|
|
#include <per_cpu.h>
|
|
#include <cpu_caps.h>
|
|
#include <security.h>
|
|
#include <logmsg.h>
|
|
|
|
static bool skip_l1dfl_vmentry;
|
|
static bool cpu_md_clear;
|
|
static int32_t ibrs_type;
|
|
|
|
static void detect_ibrs(void)
|
|
{
|
|
/* For speculation defence.
|
|
* The default way is to set IBRS at vmexit and then do IBPB at vcpu
|
|
* context switch(ibrs_type == IBRS_RAW).
|
|
* Now provide an optimized way (ibrs_type == IBRS_OPT) which set
|
|
* STIBP and do IBPB at vmexit,since having STIBP always set has less
|
|
* impact than having IBRS always set. Also since IBPB is already done
|
|
* at vmexit, it is no necessary to do so at vcpu context switch then.
|
|
*/
|
|
ibrs_type = IBRS_NONE;
|
|
|
|
/* Currently for APL, if we enabled retpoline, then IBRS should not
|
|
* take effect
|
|
* TODO: add IA32_ARCH_CAPABILITIES[1] check, if this bit is set, IBRS
|
|
* should be set all the time instead of relying on retpoline
|
|
*/
|
|
#ifndef CONFIG_RETPOLINE
|
|
if (pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
|
|
ibrs_type = IBRS_RAW;
|
|
if (pcpu_has_cap(X86_FEATURE_STIBP)) {
|
|
ibrs_type = IBRS_OPT;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
|
|
int32_t get_ibrs_type(void)
|
|
{
|
|
return ibrs_type;
|
|
}
|
|
|
|
bool check_cpu_security_cap(void)
|
|
{
|
|
bool ret = true;
|
|
bool mds_no = false;
|
|
bool ssb_no = false;
|
|
uint64_t x86_arch_capabilities;
|
|
|
|
detect_ibrs();
|
|
|
|
if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
|
|
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
|
|
skip_l1dfl_vmentry = ((x86_arch_capabilities
|
|
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
|
|
|
|
mds_no = ((x86_arch_capabilities & IA32_ARCH_CAP_MDS_NO) != 0UL);
|
|
|
|
/* SSB_NO: Processor is not susceptble to Speculative Store Bypass(SSB) */
|
|
ssb_no = ((x86_arch_capabilities & IA32_ARCH_CAP_SSB_NO) != 0UL);
|
|
}
|
|
|
|
if ((!pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
|
|
/* Processor is affected by L1TF CPU vulnerability,
|
|
* but no L1D_FLUSH command support.
|
|
*/
|
|
ret = false;
|
|
}
|
|
|
|
if ((!pcpu_has_cap(X86_FEATURE_SSBD)) && (!ssb_no)) {
|
|
/* Processor is susceptble to Speculative Store Bypass(SSB),
|
|
* but no support for Speculative Store Bypass Disable(SSBD).
|
|
*/
|
|
ret = false;
|
|
}
|
|
|
|
if ((!pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) && (!pcpu_has_cap(X86_FEATURE_STIBP))) {
|
|
ret = false;
|
|
}
|
|
|
|
if (!mds_no) { /* Processor is affected by MDS vulnerability.*/
|
|
if (pcpu_has_cap(X86_FEATURE_MDS_CLEAR)) {
|
|
cpu_md_clear = true;
|
|
#ifdef CONFIG_L1D_FLUSH_VMENTRY_ENABLED
|
|
if (!skip_l1dfl_vmentry) {
|
|
/* L1D cache flush will also overwrite CPU internal buffers,
|
|
* additional MDS buffers clear operation is not required.
|
|
*/
|
|
cpu_md_clear = false;
|
|
}
|
|
#endif
|
|
} else {
|
|
/* Processor is affected by MDS but no mitigation software
|
|
* interface is enumerated, CPU microcode need to be udpated.
|
|
*/
|
|
ret = false;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void cpu_l1d_flush(void)
|
|
{
|
|
/*
|
|
* 'skip_l1dfl_vmentry' will be true on platform that
|
|
* is not affected by L1TF.
|
|
*
|
|
*/
|
|
if (!skip_l1dfl_vmentry) {
|
|
if (pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
|
|
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
* VERW instruction (with microcode update) will overwrite
|
|
* CPU internal buffers.
|
|
*/
|
|
static inline void verw_buffer_overwriting(void)
|
|
{
|
|
uint16_t ds = HOST_GDT_RING0_DATA_SEL;
|
|
|
|
asm volatile ("verw %[ds]" : : [ds] "m" (ds) : "cc");
|
|
}
|
|
|
|
/*
|
|
* On processors that enumerate MD_CLEAR:CPUID.(EAX=7H,ECX=0):EDX[MD_CLEAR=10],
|
|
* the VERW instruction or L1D_FLUSH command should be used to cause the
|
|
* processor to overwrite buffer values that are affected by MDS
|
|
* (Microarchitectural Data Sampling) vulnerabilities.
|
|
*
|
|
* The VERW instruction and L1D_FLUSH command will overwrite below buffer values:
|
|
* - Store buffer value for the current logical processor on processors affected
|
|
* by MSBDS (Microarchitectural Store Buffer Data Sampling).
|
|
* - Fill buffer for all logical processors on the physical core for processors
|
|
* affected by MFBDS (Microarchitectural Fill Buffer Data Sampling).
|
|
* - Load port for all logical processors on the physical core for processors
|
|
* affected by MLPDS(Microarchitectural Load Port Data Sampling).
|
|
*
|
|
* If processor is affected by L1TF vulnerability and the mitigation is enabled,
|
|
* L1D_FLUSH will overwrite internal buffers on processors affected by MDS, no
|
|
* additional buffer overwriting is required before VM entry. For other cases,
|
|
* VERW instruction is used to overwrite buffer values for processors affected
|
|
* by MDS.
|
|
*/
|
|
void cpu_internal_buffers_clear(void)
|
|
{
|
|
if (cpu_md_clear) {
|
|
verw_buffer_overwriting();
|
|
}
|
|
}
|
|
|
|
uint64_t get_random_value(void)
|
|
{
|
|
uint64_t random;
|
|
|
|
asm volatile ("1: rdrand %%rax\n"
|
|
"jnc 1b\n"
|
|
"mov %%rax, %0\n"
|
|
: "=r"(random)
|
|
:
|
|
:"%rax");
|
|
return random;
|
|
}
|
|
|
|
#ifdef STACK_PROTECTOR
|
|
void set_fs_base(void)
|
|
{
|
|
struct stack_canary *psc = &get_cpu_var(stk_canary);
|
|
|
|
psc->canary = get_random_value();
|
|
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_MCE_ON_PSC_WORKAROUND_DISABLED
|
|
bool is_ept_force_4k_ipage(void)
|
|
{
|
|
return false;
|
|
}
|
|
#else
|
|
bool is_ept_force_4k_ipage(void)
|
|
{
|
|
bool force_4k_ipage = true;
|
|
const struct cpuinfo_x86 *info = get_pcpu_info();
|
|
uint64_t x86_arch_capabilities;
|
|
|
|
if (info->displayfamily == 0x6U) {
|
|
switch (info->displaymodel) {
|
|
case 0x26U:
|
|
case 0x27U:
|
|
case 0x35U:
|
|
case 0x36U:
|
|
case 0x37U:
|
|
case 0x86U:
|
|
case 0x1CU:
|
|
case 0x4AU:
|
|
case 0x4CU:
|
|
case 0x4DU:
|
|
case 0x5AU:
|
|
case 0x5CU:
|
|
case 0x5DU:
|
|
case 0x5FU:
|
|
case 0x6EU:
|
|
case 0x7AU:
|
|
/* Atom processor is not affected by the issue
|
|
* "Machine Check Error on Page Size Change"
|
|
*/
|
|
force_4k_ipage = false;
|
|
break;
|
|
default:
|
|
force_4k_ipage = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
|
|
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
|
|
if ((x86_arch_capabilities & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0UL) {
|
|
force_4k_ipage = false;
|
|
}
|
|
}
|
|
|
|
return force_4k_ipage;
|
|
}
|
|
#endif
|