hv: keylocker: Support Key Locker feature for guest VM

KeyLocker is a new security feature available in new Intel CPUs that
protects data-encryption keys for the Advanced Encryption Standard (AES)
algorithm. These keys are more valuable than what they guard. If stolen
once, the key can be repeatedly used even on another system and even
after vulnerability closed.

It also introduces a CPU-internal wrapping key (IWKey), which is a key-
encryption key to wrap AES keys into handles. While the IWKey is
inaccessible to software, randomizing the value during the boot-time
helps its value unpredictable.

Keylocker usage:
 - New “ENCODEKEY” instructions take original key input and returns HANDLE
   crypted by an internal wrap key (IWKey, init by “LOADIWKEY” instruction)
 - Software can then delete the original key from memory
 - Early in boot/software, less likely to have vulnerability that allows
   stealing original key
 - Later encrypt/decrypt can use the HANDLE through new AES KeyLocker
   instructions
 - Note:
      * Software can use original key without knowing it (use HANDLE)
      * HANDLE cannot be used on other systems or after warm/cold reset
      * IWKey cannot be read from CPU after it's loaded (this is the
        nature of this feature) and only 1 copy of IWKey inside CPU.

The virtualization implementation of Key Locker on ACRN is:
 - Each vCPU has a 'struct iwkey' to store its IWKey in struct
   acrn_vcpu_arch.
 - At initilization, every vCPU is created with a random IWKey.
 - Hypervisor traps the execution of LOADIWKEY (by 'LOADIWKEY exiting'
   VM-exectuion control) of vCPU to capture and save the IWKey if guest
   set a new IWKey. Don't support randomization (emulate CPUID to
   disable) of the LOADIWKEY as hypervisor cannot capture and save the
   random IWKey. From keylocker spec:
   "Note that a VMM may wish to enumerate no support for HW random IWKeys
   to the guest (i.e. enumerate CPUID.19H:ECX[1] as 0) as such IWKeys
   cannot be easily context switched. A guest ENCODEKEY will return the
   type of IWKey used (IWKey.KeySource) and thus will notice if a VMM
   virtualized a HW random IWKey with a SW specified IWKey."
 - In context_switch_in() of each vCPU, hypervisor loads that vCPU's
   IWKey into pCPU by LOADIWKEY instruction.
 - There is an assumption that ACRN hypervisor will never use the
   KeyLocker feature itself.

This patch implements the vCPU's IWKey management and the next patch
implements host context save/restore IWKey logic.

Tracked-On: #5695
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2020-08-24 20:20:43 +08:00 committed by wenlingz
parent 4483e93bd1
commit c11c07e0fe
10 changed files with 102 additions and 5 deletions

View File

@ -491,6 +491,9 @@ int32_t detect_hardware_support(void)
} else if (!pcpu_has_cap(X86_FEATURE_COMPACTION_EXT)) { } else if (!pcpu_has_cap(X86_FEATURE_COMPACTION_EXT)) {
printf("%s, Compaction extensions in XSAVE is not supported\n", __func__); printf("%s, Compaction extensions in XSAVE is not supported\n", __func__);
ret = -ENODEV; ret = -ENODEV;
} else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) {
printf("%s, RDRAND is not supported\n", __func__);
ret = -ENODEV;
} else { } else {
ret = check_vmx_mmu_cap(); ret = check_vmx_mmu_cap();
} }

View File

@ -10,6 +10,7 @@
#include <bits.h> #include <bits.h>
#include <vmx.h> #include <vmx.h>
#include <logmsg.h> #include <logmsg.h>
#include <cpufeatures.h>
#include <cpu_caps.h> #include <cpu_caps.h>
#include <per_cpu.h> #include <per_cpu.h>
#include <init.h> #include <init.h>
@ -194,6 +195,19 @@ void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu)
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE); vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
} }
static void init_iwkey(struct acrn_vcpu *vcpu)
{
/* Initial a random iwkey */
if (pcpu_has_cap(X86_FEATURE_KEYLOCKER)) {
vcpu->arch.IWKey.integrity_key[0] = get_random_value();
vcpu->arch.IWKey.integrity_key[1] = get_random_value();
vcpu->arch.IWKey.encryption_key[0] = get_random_value();
vcpu->arch.IWKey.encryption_key[1] = get_random_value();
vcpu->arch.IWKey.encryption_key[2] = get_random_value();
vcpu->arch.IWKey.encryption_key[3] = get_random_value();
}
}
/* As a vcpu reset internal API, DO NOT touch any vcpu state transition in this function. */ /* As a vcpu reset internal API, DO NOT touch any vcpu state transition in this function. */
static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode) static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode)
{ {
@ -225,6 +239,8 @@ static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode)
for (i = 0; i < VCPU_EVENT_NUM; i++) { for (i = 0; i < VCPU_EVENT_NUM; i++) {
reset_event(&vcpu->events[i]); reset_event(&vcpu->events[i]);
} }
init_iwkey(vcpu);
} }
struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id) struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id)

View File

@ -237,6 +237,25 @@ static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req)
} }
static uint32_t check_vmx_ctrl_64(uint32_t msr, uint64_t ctrl_req)
{
uint64_t vmx_msr;
uint32_t ctrl = ctrl_req;
vmx_msr = msr_read(msr);
/* 64 bits are allowed 1-setting */
ctrl &= vmx_msr;
if ((ctrl_req & ~ctrl) != 0U) {
pr_err("VMX ctrl 0x%x not fully enabled: "
"request 0x%llx but get 0x%llx\n",
msr, ctrl_req, ctrl);
}
return ctrl;
}
static void init_exec_ctrl(struct acrn_vcpu *vcpu) static void init_exec_ctrl(struct acrn_vcpu *vcpu)
{ {
uint32_t value32; uint32_t value32;
@ -336,6 +355,15 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32); exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32); pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);
/* Set up tertiary processor based VM execution controls */
if ((exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS) & VMX_PROCBASED_CTLS_TERTIARY) != 0U) {
/* Enable KeyLocker if support */
value64 = check_vmx_ctrl_64(MSR_IA32_VMX_PROCBASED_CTLS3, VMX_PROCBASED_CTLS3_LOADIWKEY);
exec_vmwrite64(VMX_PROC_VM_EXEC_CONTROLS3_FULL, value64);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS3: 0x%llx ", value64);
}
/*APIC-v, config APIC-access address*/ /*APIC-v, config APIC-access address*/
value64 = vlapic_apicv_get_apic_access_addr(); value64 = vlapic_apicv_get_apic_access_addr();
exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL, value64); exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL, value64);

View File

@ -17,6 +17,7 @@
#include <splitlock.h> #include <splitlock.h>
#include <ept.h> #include <ept.h>
#include <vtd.h> #include <vtd.h>
#include <cpuid.h>
#include <vcpuid.h> #include <vcpuid.h>
#include <trace.h> #include <trace.h>
#include <rtcm.h> #include <rtcm.h>
@ -25,7 +26,7 @@
* According to "SDM APPENDIX C VMX BASIC EXIT REASONS", * According to "SDM APPENDIX C VMX BASIC EXIT REASONS",
* there are 65 Basic Exit Reasons. * there are 65 Basic Exit Reasons.
*/ */
#define NR_VMX_EXIT_REASONS 65U #define NR_VMX_EXIT_REASONS 70U
static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
@ -35,6 +36,7 @@ static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu); static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu);
static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu);
static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu);
/* VM Dispatch table for Exit condition handling */ /* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@ -169,7 +171,9 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_XSAVES] = { [VMX_EXIT_REASON_XSAVES] = {
.handler = unhandled_vmexit_handler}, .handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_XRSTORS] = { [VMX_EXIT_REASON_XRSTORS] = {
.handler = unhandled_vmexit_handler} .handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_LOADIWKEY] = {
.handler = loadiwkey_vmexit_handler}
}; };
int32_t vmexit_handler(struct acrn_vcpu *vcpu) int32_t vmexit_handler(struct acrn_vcpu *vcpu)
@ -430,6 +434,31 @@ static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
return 0; return 0;
} }
static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t xmm[6] = {0};
/* Wrapping key nobackup and randomization are not supported */
if ((vcpu_get_gpreg(vcpu, CPU_REG_RAX) != 0UL)) {
vcpu_inject_gp(vcpu, 0);
} else {
asm volatile ("movdqu %%xmm0, %0\n"
"movdqu %%xmm1, %1\n"
"movdqu %%xmm2, %2\n"
: : "m"(xmm[0]), "m"(xmm[2]), "m"(xmm[4]));
vcpu->arch.IWKey.encryption_key[0] = xmm[2];
vcpu->arch.IWKey.encryption_key[1] = xmm[3];
vcpu->arch.IWKey.encryption_key[2] = xmm[4];
vcpu->arch.IWKey.encryption_key[3] = xmm[5];
vcpu->arch.IWKey.integrity_key[0] = xmm[0];
vcpu->arch.IWKey.integrity_key[1] = xmm[1];
loadiwkey(0);
}
return 0;
}
/* vmexit handler for just injecting a #UD exception /* vmexit handler for just injecting a #UD exception
* *
* ACRN doesn't support nested virtualization, the following VMExit will inject #UD * ACRN doesn't support nested virtualization, the following VMExit will inject #UD

View File

@ -162,10 +162,9 @@ void cpu_internal_buffers_clear(void)
} }
} }
#ifdef STACK_PROTECTOR uint64_t get_random_value(void)
static uint64_t get_random_value(void)
{ {
uint64_t random = 0UL; uint64_t random;
asm volatile ("1: rdrand %%rax\n" asm volatile ("1: rdrand %%rax\n"
"jnc 1b\n" "jnc 1b\n"
@ -176,6 +175,7 @@ static uint64_t get_random_value(void)
return random; return random;
} }
#ifdef STACK_PROTECTOR
void set_fs_base(void) void set_fs_base(void)
{ {
struct stack_canary *psc = &get_cpu_var(stk_canary); struct stack_canary *psc = &get_cpu_var(stk_canary);

View File

@ -659,6 +659,11 @@ static inline void xrstors(const struct xsave_area *region_addr, uint64_t mask)
"memory"); "memory");
} }
static inline void loadiwkey(uint32_t eax)
{
asm volatile(".byte 0xf3, 0x0f, 0x38, 0xdc, 0xd1;": : "a" (eax));
}
/* /*
* stac/clac pair is used to access guest's memory protected by SMAP, * stac/clac pair is used to access guest's memory protected by SMAP,
* following below flow: * following below flow:

View File

@ -35,6 +35,7 @@
#define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5U) + 26U) #define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5U) + 26U)
#define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5U) + 27U) #define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5U) + 27U)
#define X86_FEATURE_AVX ((FEAT_1_ECX << 5U) + 28U) #define X86_FEATURE_AVX ((FEAT_1_ECX << 5U) + 28U)
#define X86_FEATURE_RDRAND ((FEAT_1_ECX << 5U) + 30U)
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX)*/ /* Intel-defined CPU features, CPUID level 0x00000001 (EDX)*/
#define X86_FEATURE_FPU ((FEAT_1_EDX << 5U) + 0U) #define X86_FEATURE_FPU ((FEAT_1_EDX << 5U) + 0U)

View File

@ -203,6 +203,13 @@ struct msr_store_area {
uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */ uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */
}; };
struct iwkey {
/* 256bit encryption key */
uint64_t encryption_key[4];
/* 128bit integration key */
uint64_t integrity_key[2];
};
struct acrn_vcpu_arch { struct acrn_vcpu_arch {
/* vmcs region for this vcpu, MUST be 4KB-aligned */ /* vmcs region for this vcpu, MUST be 4KB-aligned */
uint8_t vmcs[PAGE_SIZE]; uint8_t vmcs[PAGE_SIZE];
@ -261,6 +268,7 @@ struct acrn_vcpu_arch {
uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U]; uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U];
/* Keylocker */ /* Keylocker */
struct iwkey IWKey;
bool cr4_kl_enabled; bool cr4_kl_enabled;
} __aligned(PAGE_SIZE); } __aligned(PAGE_SIZE);

View File

@ -22,6 +22,7 @@ void cpu_l1d_flush(void);
bool check_cpu_security_cap(void); bool check_cpu_security_cap(void);
void cpu_internal_buffers_clear(void); void cpu_internal_buffers_clear(void);
bool is_ept_force_4k_ipage(void); bool is_ept_force_4k_ipage(void);
uint64_t get_random_value(void);
#ifdef STACK_PROTECTOR #ifdef STACK_PROTECTOR
struct stack_canary { struct stack_canary {

View File

@ -64,6 +64,10 @@
#define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU #define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU
#define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU #define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU
#define VMX_PROC_VM_EXEC_CONTROLS3_FULL 0x00002034U
#define VMX_PROC_VM_EXEC_CONTROLS3_HIGH 0x00002035U
/* 64-bit read-only data fields */ /* 64-bit read-only data fields */
#define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400U #define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400U
#define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401U #define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401U
@ -263,6 +267,7 @@
#define VMX_EXIT_REASON_PAGE_MODIFICATION_LOG_FULL 0x0000003EU #define VMX_EXIT_REASON_PAGE_MODIFICATION_LOG_FULL 0x0000003EU
#define VMX_EXIT_REASON_XSAVES 0x0000003FU #define VMX_EXIT_REASON_XSAVES 0x0000003FU
#define VMX_EXIT_REASON_XRSTORS 0x00000040U #define VMX_EXIT_REASON_XRSTORS 0x00000040U
#define VMX_EXIT_REASON_LOADIWKEY 0x00000045U
/* VMX execution control bits (pin based) */ /* VMX execution control bits (pin based) */
#define VMX_PINBASED_CTLS_IRQ_EXIT (1U<<0U) #define VMX_PINBASED_CTLS_IRQ_EXIT (1U<<0U)
@ -312,6 +317,7 @@
#define VMX_PROCBASED_CTLS2_RDSEED (1U<<16U) #define VMX_PROCBASED_CTLS2_RDSEED (1U<<16U)
#define VMX_PROCBASED_CTLS2_EPT_VE (1U<<18U) #define VMX_PROCBASED_CTLS2_EPT_VE (1U<<18U)
#define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1U<<20U) #define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1U<<20U)
#define VMX_PROCBASED_CTLS3_LOADIWKEY (1U<<0U)
/* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */ /* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */
#define VMX_EPT_EXECUTE_ONLY (1U << 0U) #define VMX_EPT_EXECUTE_ONLY (1U << 0U)