mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-05-03 22:17:03 +00:00
KeyLocker is a new security feature available in new Intel CPUs that protects data-encryption keys for the Advanced Encryption Standard (AES) algorithm. These keys are more valuable than what they guard. If stolen once, the key can be repeatedly used even on another system and even after vulnerability closed. It also introduces a CPU-internal wrapping key (IWKey), which is a key- encryption key to wrap AES keys into handles. While the IWKey is inaccessible to software, randomizing the value during the boot-time helps its value unpredictable. Keylocker usage: - New “ENCODEKEY” instructions take original key input and returns HANDLE crypted by an internal wrap key (IWKey, init by “LOADIWKEY” instruction) - Software can then delete the original key from memory - Early in boot/software, less likely to have vulnerability that allows stealing original key - Later encrypt/decrypt can use the HANDLE through new AES KeyLocker instructions - Note: * Software can use original key without knowing it (use HANDLE) * HANDLE cannot be used on other systems or after warm/cold reset * IWKey cannot be read from CPU after it's loaded (this is the nature of this feature) and only 1 copy of IWKey inside CPU. The virtualization implementation of Key Locker on ACRN is: - Each vCPU has a 'struct iwkey' to store its IWKey in struct acrn_vcpu_arch. - At initilization, every vCPU is created with a random IWKey. - Hypervisor traps the execution of LOADIWKEY (by 'LOADIWKEY exiting' VM-exectuion control) of vCPU to capture and save the IWKey if guest set a new IWKey. Don't support randomization (emulate CPUID to disable) of the LOADIWKEY as hypervisor cannot capture and save the random IWKey. From keylocker spec: "Note that a VMM may wish to enumerate no support for HW random IWKeys to the guest (i.e. enumerate CPUID.19H:ECX[1] as 0) as such IWKeys cannot be easily context switched. A guest ENCODEKEY will return the type of IWKey used (IWKey.KeySource) and thus will notice if a VMM virtualized a HW random IWKey with a SW specified IWKey." - In context_switch_in() of each vCPU, hypervisor loads that vCPU's IWKey into pCPU by LOADIWKEY instruction. - There is an assumption that ACRN hypervisor will never use the KeyLocker feature itself. This patch implements the vCPU's IWKey management and the next patch implements host context save/restore IWKey logic. Tracked-On: #5695 Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
107 lines
4.9 KiB
C
107 lines
4.9 KiB
C
/*
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#ifndef CPUFEATURES_H
|
|
#define CPUFEATURES_H
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX)*/
|
|
#define X86_FEATURE_SSE3 ((FEAT_1_ECX << 5U) + 0U)
|
|
#define X86_FEATURE_PCLMUL ((FEAT_1_ECX << 5U) + 1U)
|
|
#define X86_FEATURE_DTES64 ((FEAT_1_ECX << 5U) + 2U)
|
|
#define X86_FEATURE_MONITOR ((FEAT_1_ECX << 5U) + 3U)
|
|
#define X86_FEATURE_DS_CPL ((FEAT_1_ECX << 5U) + 4U)
|
|
#define X86_FEATURE_VMX ((FEAT_1_ECX << 5U) + 5U)
|
|
#define X86_FEATURE_SMX ((FEAT_1_ECX << 5U) + 6U)
|
|
#define X86_FEATURE_EST ((FEAT_1_ECX << 5U) + 7U)
|
|
#define X86_FEATURE_TM2 ((FEAT_1_ECX << 5U) + 8U)
|
|
#define X86_FEATURE_SSSE3 ((FEAT_1_ECX << 5U) + 9U)
|
|
#define X86_FEATURE_CID ((FEAT_1_ECX << 5U) + 10U)
|
|
#define X86_FEATURE_FMA ((FEAT_1_ECX << 5U) + 12U)
|
|
#define X86_FEATURE_CX16 ((FEAT_1_ECX << 5U) + 13U)
|
|
#define X86_FEATURE_ETPRD ((FEAT_1_ECX << 5U) + 14U)
|
|
#define X86_FEATURE_PDCM ((FEAT_1_ECX << 5U) + 15U)
|
|
#define X86_FEATURE_PCID ((FEAT_1_ECX << 5U) + 17U)
|
|
#define X86_FEATURE_DCA ((FEAT_1_ECX << 5U) + 18U)
|
|
#define X86_FEATURE_SSE4_1 ((FEAT_1_ECX << 5U) + 19U)
|
|
#define X86_FEATURE_SSE4_2 ((FEAT_1_ECX << 5U) + 20U)
|
|
#define X86_FEATURE_X2APIC ((FEAT_1_ECX << 5U) + 21U)
|
|
#define X86_FEATURE_MOVBE ((FEAT_1_ECX << 5U) + 22U)
|
|
#define X86_FEATURE_POPCNT ((FEAT_1_ECX << 5U) + 23U)
|
|
#define X86_FEATURE_TSC_DEADLINE ((FEAT_1_ECX << 5U) + 24U)
|
|
#define X86_FEATURE_AES ((FEAT_1_ECX << 5U) + 25U)
|
|
#define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5U) + 26U)
|
|
#define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5U) + 27U)
|
|
#define X86_FEATURE_AVX ((FEAT_1_ECX << 5U) + 28U)
|
|
#define X86_FEATURE_RDRAND ((FEAT_1_ECX << 5U) + 30U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX)*/
|
|
#define X86_FEATURE_FPU ((FEAT_1_EDX << 5U) + 0U)
|
|
#define X86_FEATURE_VME ((FEAT_1_EDX << 5U) + 1U)
|
|
#define X86_FEATURE_DE ((FEAT_1_EDX << 5U) + 2U)
|
|
#define X86_FEATURE_PSE ((FEAT_1_EDX << 5U) + 3U)
|
|
#define X86_FEATURE_TSC ((FEAT_1_EDX << 5U) + 4U)
|
|
#define X86_FEATURE_MSR ((FEAT_1_EDX << 5U) + 5U)
|
|
#define X86_FEATURE_PAE ((FEAT_1_EDX << 5U) + 6U)
|
|
#define X86_FEATURE_MCE ((FEAT_1_EDX << 5U) + 7U)
|
|
#define X86_FEATURE_CX8 ((FEAT_1_EDX << 5U) + 8U)
|
|
#define X86_FEATURE_APIC ((FEAT_1_EDX << 5U) + 9U)
|
|
#define X86_FEATURE_SEP ((FEAT_1_EDX << 5U) + 11U)
|
|
#define X86_FEATURE_MTRR ((FEAT_1_EDX << 5U) + 12U)
|
|
#define X86_FEATURE_PGE ((FEAT_1_EDX << 5U) + 13U)
|
|
#define X86_FEATURE_MCA ((FEAT_1_EDX << 5U) + 14U)
|
|
#define X86_FEATURE_CMOV ((FEAT_1_EDX << 5U) + 15U)
|
|
#define X86_FEATURE_PAT ((FEAT_1_EDX << 5U) + 16U)
|
|
#define X86_FEATURE_PSE36 ((FEAT_1_EDX << 5U) + 17U)
|
|
#define X86_FEATURE_PSN ((FEAT_1_EDX << 5U) + 18U)
|
|
#define X86_FEATURE_CLF ((FEAT_1_EDX << 5U) + 19U)
|
|
#define X86_FEATURE_DTES ((FEAT_1_EDX << 5U) + 21U)
|
|
#define X86_FEATURE_ACPI ((FEAT_1_EDX << 5U) + 22U)
|
|
#define X86_FEATURE_MMX ((FEAT_1_EDX << 5U) + 23U)
|
|
#define X86_FEATURE_FXSR ((FEAT_1_EDX << 5U) + 24U)
|
|
#define X86_FEATURE_SSE ((FEAT_1_EDX << 5U) + 25U)
|
|
#define X86_FEATURE_SSE2 ((FEAT_1_EDX << 5U) + 26U)
|
|
#define X86_FEATURE_SS ((FEAT_1_EDX << 5U) + 27U)
|
|
#define X86_FEATURE_HTT ((FEAT_1_EDX << 5U) + 28U)
|
|
#define X86_FEATURE_TM1 ((FEAT_1_EDX << 5U) + 29U)
|
|
#define X86_FEATURE_IA64 ((FEAT_1_EDX << 5U) + 30U)
|
|
#define X86_FEATURE_PBE ((FEAT_1_EDX << 5U) + 31U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000007 (EBX)*/
|
|
#define X86_FEATURE_TSC_ADJ ((FEAT_7_0_EBX << 5U) + 1U)
|
|
#define X86_FEATURE_SGX ((FEAT_7_0_EBX << 5U) + 2U)
|
|
#define X86_FEATURE_SMEP ((FEAT_7_0_EBX << 5U) + 7U)
|
|
#define X86_FEATURE_ERMS ((FEAT_7_0_EBX << 5U) + 9U)
|
|
#define X86_FEATURE_INVPCID ((FEAT_7_0_EBX << 5U) + 10U)
|
|
#define X86_FEATURE_RDT_A ((FEAT_7_0_EBX << 5U) + 15U)
|
|
#define X86_FEATURE_SMAP ((FEAT_7_0_EBX << 5U) + 20U)
|
|
#define X86_FEATURE_CLFLUSHOPT ((FEAT_7_0_EBX << 5U) + 23U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000007 (ECX)*/
|
|
#define X86_FEATURE_KEYLOCKER ((FEAT_7_0_ECX << 5U) + 23U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x00000007 (EDX)*/
|
|
#define X86_FEATURE_MDS_CLEAR ((FEAT_7_0_EDX << 5U) + 10U)
|
|
#define X86_FEATURE_IBRS_IBPB ((FEAT_7_0_EDX << 5U) + 26U)
|
|
#define X86_FEATURE_STIBP ((FEAT_7_0_EDX << 5U) + 27U)
|
|
#define X86_FEATURE_L1D_FLUSH ((FEAT_7_0_EDX << 5U) + 28U)
|
|
#define X86_FEATURE_ARCH_CAP ((FEAT_7_0_EDX << 5U) + 29U)
|
|
#define X86_FEATURE_CORE_CAP ((FEAT_7_0_EDX << 5U) + 30U)
|
|
#define X86_FEATURE_SSBD ((FEAT_7_0_EDX << 5U) + 31U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x80000001 (EDX)*/
|
|
#define X86_FEATURE_NX ((FEAT_8000_0001_EDX << 5U) + 20U)
|
|
#define X86_FEATURE_PAGE1GB ((FEAT_8000_0001_EDX << 5U) + 26U)
|
|
#define X86_FEATURE_LM ((FEAT_8000_0001_EDX << 5U) + 29U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x80000007 (EDX)*/
|
|
#define X86_FEATURE_INVA_TSC ((FEAT_8000_0007_EDX << 5U) + 8U)
|
|
|
|
/* Intel-defined CPU features, CPUID level 0x0000000D, sub 0x1 */
|
|
#define X86_FEATURE_COMPACTION_EXT ((FEAT_D_1_EAX << 5U) + 1U)
|
|
#define X86_FEATURE_XSAVES ((FEAT_D_1_EAX << 5U) + 3U)
|
|
|
|
#endif /* CPUFEATURES_H */
|