diff --git a/hypervisor/arch/x86/cpu_caps.c b/hypervisor/arch/x86/cpu_caps.c index 238955ece..8853150dd 100644 --- a/hypervisor/arch/x86/cpu_caps.c +++ b/hypervisor/arch/x86/cpu_caps.c @@ -491,6 +491,9 @@ int32_t detect_hardware_support(void) } else if (!pcpu_has_cap(X86_FEATURE_COMPACTION_EXT)) { printf("%s, Compaction extensions in XSAVE is not supported\n", __func__); ret = -ENODEV; + } else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) { + printf("%s, RDRAND is not supported\n", __func__); + ret = -ENODEV; } else { ret = check_vmx_mmu_cap(); } diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index bf6a049fe..8f75eda80 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -194,6 +195,19 @@ void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu) vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE); } +static void init_iwkey(struct acrn_vcpu *vcpu) +{ + /* Initial a random iwkey */ + if (pcpu_has_cap(X86_FEATURE_KEYLOCKER)) { + vcpu->arch.IWKey.integrity_key[0] = get_random_value(); + vcpu->arch.IWKey.integrity_key[1] = get_random_value(); + vcpu->arch.IWKey.encryption_key[0] = get_random_value(); + vcpu->arch.IWKey.encryption_key[1] = get_random_value(); + vcpu->arch.IWKey.encryption_key[2] = get_random_value(); + vcpu->arch.IWKey.encryption_key[3] = get_random_value(); + } +} + /* As a vcpu reset internal API, DO NOT touch any vcpu state transition in this function. */ static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode) { @@ -225,6 +239,8 @@ static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode) for (i = 0; i < VCPU_EVENT_NUM; i++) { reset_event(&vcpu->events[i]); } + + init_iwkey(vcpu); } struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id) diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index 04b0abd25..cfa982417 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -237,6 +237,25 @@ static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req) } +static uint32_t check_vmx_ctrl_64(uint32_t msr, uint64_t ctrl_req) +{ + uint64_t vmx_msr; + uint32_t ctrl = ctrl_req; + + vmx_msr = msr_read(msr); + + /* 64 bits are allowed 1-setting */ + ctrl &= vmx_msr; + + if ((ctrl_req & ~ctrl) != 0U) { + pr_err("VMX ctrl 0x%x not fully enabled: " + "request 0x%llx but get 0x%llx\n", + msr, ctrl_req, ctrl); + } + + return ctrl; +} + static void init_exec_ctrl(struct acrn_vcpu *vcpu) { uint32_t value32; @@ -336,6 +355,15 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu) exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32); pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32); + /* Set up tertiary processor based VM execution controls */ + if ((exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS) & VMX_PROCBASED_CTLS_TERTIARY) != 0U) { + /* Enable KeyLocker if support */ + value64 = check_vmx_ctrl_64(MSR_IA32_VMX_PROCBASED_CTLS3, VMX_PROCBASED_CTLS3_LOADIWKEY); + + exec_vmwrite64(VMX_PROC_VM_EXEC_CONTROLS3_FULL, value64); + pr_dbg("VMX_PROC_VM_EXEC_CONTROLS3: 0x%llx ", value64); + } + /*APIC-v, config APIC-access address*/ value64 = vlapic_apicv_get_apic_access_addr(); exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL, value64); diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index 9b2c1b2b9..0344b950f 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -25,7 +26,7 @@ * According to "SDM APPENDIX C VMX BASIC EXIT REASONS", * there are 65 Basic Exit Reasons. */ -#define NR_VMX_EXIT_REASONS 65U +#define NR_VMX_EXIT_REASONS 70U static int32_t triple_fault_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu); @@ -35,6 +36,7 @@ static int32_t undefined_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu); static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu); static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu); +static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu); /* VM Dispatch table for Exit condition handling */ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { @@ -169,7 +171,9 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = { [VMX_EXIT_REASON_XSAVES] = { .handler = unhandled_vmexit_handler}, [VMX_EXIT_REASON_XRSTORS] = { - .handler = unhandled_vmexit_handler} + .handler = unhandled_vmexit_handler}, + [VMX_EXIT_REASON_LOADIWKEY] = { + .handler = loadiwkey_vmexit_handler} }; int32_t vmexit_handler(struct acrn_vcpu *vcpu) @@ -430,6 +434,31 @@ static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu) return 0; } +static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu) +{ + uint64_t xmm[6] = {0}; + + /* Wrapping key nobackup and randomization are not supported */ + if ((vcpu_get_gpreg(vcpu, CPU_REG_RAX) != 0UL)) { + vcpu_inject_gp(vcpu, 0); + } else { + asm volatile ("movdqu %%xmm0, %0\n" + "movdqu %%xmm1, %1\n" + "movdqu %%xmm2, %2\n" + : : "m"(xmm[0]), "m"(xmm[2]), "m"(xmm[4])); + vcpu->arch.IWKey.encryption_key[0] = xmm[2]; + vcpu->arch.IWKey.encryption_key[1] = xmm[3]; + vcpu->arch.IWKey.encryption_key[2] = xmm[4]; + vcpu->arch.IWKey.encryption_key[3] = xmm[5]; + vcpu->arch.IWKey.integrity_key[0] = xmm[0]; + vcpu->arch.IWKey.integrity_key[1] = xmm[1]; + + loadiwkey(0); + } + + return 0; +} + /* vmexit handler for just injecting a #UD exception * * ACRN doesn't support nested virtualization, the following VMExit will inject #UD diff --git a/hypervisor/arch/x86/security.c b/hypervisor/arch/x86/security.c index a065c19e0..b2f3daeec 100644 --- a/hypervisor/arch/x86/security.c +++ b/hypervisor/arch/x86/security.c @@ -162,10 +162,9 @@ void cpu_internal_buffers_clear(void) } } -#ifdef STACK_PROTECTOR -static uint64_t get_random_value(void) +uint64_t get_random_value(void) { - uint64_t random = 0UL; + uint64_t random; asm volatile ("1: rdrand %%rax\n" "jnc 1b\n" @@ -176,6 +175,7 @@ static uint64_t get_random_value(void) return random; } +#ifdef STACK_PROTECTOR void set_fs_base(void) { struct stack_canary *psc = &get_cpu_var(stk_canary); diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h index 31d70891d..19244196b 100644 --- a/hypervisor/include/arch/x86/cpu.h +++ b/hypervisor/include/arch/x86/cpu.h @@ -659,6 +659,11 @@ static inline void xrstors(const struct xsave_area *region_addr, uint64_t mask) "memory"); } +static inline void loadiwkey(uint32_t eax) +{ + asm volatile(".byte 0xf3, 0x0f, 0x38, 0xdc, 0xd1;": : "a" (eax)); +} + /* * stac/clac pair is used to access guest's memory protected by SMAP, * following below flow: diff --git a/hypervisor/include/arch/x86/cpufeatures.h b/hypervisor/include/arch/x86/cpufeatures.h index 2e9cc9238..d8447c096 100644 --- a/hypervisor/include/arch/x86/cpufeatures.h +++ b/hypervisor/include/arch/x86/cpufeatures.h @@ -35,6 +35,7 @@ #define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5U) + 26U) #define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5U) + 27U) #define X86_FEATURE_AVX ((FEAT_1_ECX << 5U) + 28U) +#define X86_FEATURE_RDRAND ((FEAT_1_ECX << 5U) + 30U) /* Intel-defined CPU features, CPUID level 0x00000001 (EDX)*/ #define X86_FEATURE_FPU ((FEAT_1_EDX << 5U) + 0U) diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 4f181247f..45c4337a9 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -203,6 +203,13 @@ struct msr_store_area { uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */ }; +struct iwkey { + /* 256bit encryption key */ + uint64_t encryption_key[4]; + /* 128bit integration key */ + uint64_t integrity_key[2]; +}; + struct acrn_vcpu_arch { /* vmcs region for this vcpu, MUST be 4KB-aligned */ uint8_t vmcs[PAGE_SIZE]; @@ -261,6 +268,7 @@ struct acrn_vcpu_arch { uint64_t eoi_exit_bitmap[EOI_EXIT_BITMAP_SIZE >> 6U]; /* Keylocker */ + struct iwkey IWKey; bool cr4_kl_enabled; } __aligned(PAGE_SIZE); diff --git a/hypervisor/include/arch/x86/security.h b/hypervisor/include/arch/x86/security.h index b2a158de1..2f54f1a58 100644 --- a/hypervisor/include/arch/x86/security.h +++ b/hypervisor/include/arch/x86/security.h @@ -22,6 +22,7 @@ void cpu_l1d_flush(void); bool check_cpu_security_cap(void); void cpu_internal_buffers_clear(void); bool is_ept_force_4k_ipage(void); +uint64_t get_random_value(void); #ifdef STACK_PROTECTOR struct stack_canary { diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h index a33ee2bd4..a5f1880ff 100644 --- a/hypervisor/include/arch/x86/vmx.h +++ b/hypervisor/include/arch/x86/vmx.h @@ -64,6 +64,10 @@ #define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU #define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU + +#define VMX_PROC_VM_EXEC_CONTROLS3_FULL 0x00002034U +#define VMX_PROC_VM_EXEC_CONTROLS3_HIGH 0x00002035U + /* 64-bit read-only data fields */ #define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400U #define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401U @@ -263,6 +267,7 @@ #define VMX_EXIT_REASON_PAGE_MODIFICATION_LOG_FULL 0x0000003EU #define VMX_EXIT_REASON_XSAVES 0x0000003FU #define VMX_EXIT_REASON_XRSTORS 0x00000040U +#define VMX_EXIT_REASON_LOADIWKEY 0x00000045U /* VMX execution control bits (pin based) */ #define VMX_PINBASED_CTLS_IRQ_EXIT (1U<<0U) @@ -312,6 +317,7 @@ #define VMX_PROCBASED_CTLS2_RDSEED (1U<<16U) #define VMX_PROCBASED_CTLS2_EPT_VE (1U<<18U) #define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1U<<20U) +#define VMX_PROCBASED_CTLS3_LOADIWKEY (1U<<0U) /* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */ #define VMX_EPT_EXECUTE_ONLY (1U << 0U)