hv: add hardware_detect_support to detect which hardware we support

Now just add some basic feature/capability detect (not all). Vapic
didn't add here for if we must support vapic then the code which
for vapic not supported must remove, like mmio apic r/w.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-04-17 11:40:01 +08:00 committed by lijinxia
parent f053288fca
commit deb1c29b59
6 changed files with 114 additions and 38 deletions

View File

@ -126,7 +126,6 @@ static uint64_t get_address_mask(uint8_t limit)
static void get_cpu_capabilities(void) static void get_cpu_capabilities(void)
{ {
uint32_t eax, unused; uint32_t eax, unused;
uint32_t max_extended_function_idx;
uint32_t family, model; uint32_t family, model;
cpuid(CPUID_FEATURES, &eax, &unused, cpuid(CPUID_FEATURES, &eax, &unused,
@ -149,28 +148,28 @@ static void get_cpu_capabilities(void)
&boot_cpu_data.cpuid_leaves[FEAT_7_0_EDX]); &boot_cpu_data.cpuid_leaves[FEAT_7_0_EDX]);
cpuid(CPUID_MAX_EXTENDED_FUNCTION, cpuid(CPUID_MAX_EXTENDED_FUNCTION,
&max_extended_function_idx, &boot_cpu_data.extended_cpuid_level,
&unused, &unused, &unused); &unused, &unused, &unused);
boot_cpu_data.cpuid_leaves[FEAT_8000_0000_EAX] =
max_extended_function_idx;
if (max_extended_function_idx < CPUID_EXTEND_ADDRESS_SIZE) { if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_FUNCTION_1)
panic("CPU w/o CPUID.80000008H is not supported"); cpuid(CPUID_EXTEND_FUNCTION_1, &unused, &unused,
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_ECX],
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_EDX]);
if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_ADDRESS_SIZE) {
cpuid(CPUID_EXTEND_ADDRESS_SIZE, &eax,
&boot_cpu_data.cpuid_leaves[FEAT_8000_0008_EBX],
&unused, &unused);
/* EAX bits 07-00: #Physical Address Bits
* bits 15-08: #Linear Address Bits
*/
boot_cpu_data.x86_virt_bits = (eax >> 8) & 0xff;
boot_cpu_data.x86_phys_bits = eax & 0xff;
boot_cpu_data.physical_address_mask =
get_address_mask(boot_cpu_data.x86_phys_bits);
} }
cpuid(CPUID_EXTEND_FUNCTION_1, &unused, &unused,
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_ECX],
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_EDX]);
cpuid(CPUID_EXTEND_ADDRESS_SIZE,
&eax, &unused, &unused, &unused);
boot_cpu_data.cpuid_leaves[FEAT_8000_0008_EAX] = eax;
/* EAX bits 07-00: #Physical Address Bits
* bits 15-08: #Linear Address Bits
*/
boot_cpu_data.physical_address_mask =
get_address_mask(eax & 0xff);
/* For speculation defence. /* For speculation defence.
* The default way is to set IBRS at vmexit and then do IBPB at vcpu * The default way is to set IBRS at vmexit and then do IBPB at vcpu
* context switch(ibrs_type == IBRS_RAW). * context switch(ibrs_type == IBRS_RAW).
@ -195,6 +194,64 @@ static void get_cpu_capabilities(void)
#endif #endif
} }
/*
* basic hardware capability check
* we should supplement which feature/capability we must support
* here later.
*/
static int hardware_detect_support(void)
{
int ret;
/* Long Mode (x86-64, 64-bit support) */
if (!cpu_has_cap(X86_FEATURE_LM)) {
pr_fatal("%s, LM not supported\n", __func__);
return -ENODEV;
}
if ((boot_cpu_data.x86_phys_bits == 0) ||
(boot_cpu_data.x86_virt_bits == 0)) {
pr_fatal("%s, can't detect Linear/Physical Address size\n",
__func__);
return -ENODEV;
}
/* lapic TSC deadline timer */
if (!cpu_has_cap(X86_FEATURE_TSC_DEADLINE)) {
pr_fatal("%s, TSC deadline not supported\n", __func__);
return -ENODEV;
}
/* Execute Disable */
if (!cpu_has_cap(X86_FEATURE_NX)) {
pr_fatal("%s, NX not supported\n", __func__);
return -ENODEV;
}
/* Supervisor-Mode Execution Prevention */
if (!cpu_has_cap(X86_FEATURE_SMEP)) {
pr_fatal("%s, SMEP not supported\n", __func__);
return -ENODEV;
}
/* Supervisor-Mode Access Prevention */
if (!cpu_has_cap(X86_FEATURE_SMAP)) {
pr_fatal("%s, SMAP not supported\n", __func__);
return -ENODEV;
}
if (!cpu_has_cap(X86_FEATURE_VMX)) {
pr_fatal("%s, vmx not supported\n", __func__);
return -ENODEV;
}
ret = check_vmx_mmu_cap();
if (ret)
return ret;
printf("hardware support HV\n");
return 0;
}
static void alloc_phy_cpu_data(int pcpu_num) static void alloc_phy_cpu_data(int pcpu_num)
{ {
phy_cpu_num = pcpu_num; phy_cpu_num = pcpu_num;
@ -458,6 +515,11 @@ void bsp_boot_init(void)
pr_dbg("Core %d is up", CPU_BOOT_ID); pr_dbg("Core %d is up", CPU_BOOT_ID);
if (hardware_detect_support() != 0) {
pr_fatal("hardware not support!\n");
return;
}
/* Warn for security feature not ready */ /* Warn for security feature not ready */
if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) && if (!cpu_has_cap(X86_FEATURE_IBRS_IBPB) &&
!cpu_has_cap(X86_FEATURE_STIBP)) { !cpu_has_cap(X86_FEATURE_STIBP)) {
@ -492,7 +554,10 @@ void bsp_boot_init(void)
ASSERT(get_cpu_id() == CPU_BOOT_ID, ""); ASSERT(get_cpu_id() == CPU_BOOT_ID, "");
init_iommu(); if (init_iommu() != 0) {
pr_fatal("%s, init iommu failed\n", __func__);
return;
}
console_setup_timer(); console_setup_timer();

View File

@ -94,7 +94,7 @@ static inline bool cpu_has_vmx_vpid_cap(uint32_t bit_mask)
return !!(vmx_caps.vpid & bit_mask); return !!(vmx_caps.vpid & bit_mask);
} }
static void check_mmu_capability(void) int check_vmx_mmu_cap(void)
{ {
uint64_t val; uint64_t val;
@ -103,8 +103,17 @@ static void check_mmu_capability(void)
vmx_caps.ept = (uint32_t) val; vmx_caps.ept = (uint32_t) val;
vmx_caps.vpid = (uint32_t) (val >> 32); vmx_caps.vpid = (uint32_t) (val >> 32);
if (!cpu_has_vmx_ept_cap(VMX_EPT_INVEPT)) if (!cpu_has_vmx_ept_cap(VMX_EPT_INVEPT)) {
panic("invept must be supported"); pr_fatal("%s, invept not supported\n", __func__);
return -ENODEV;
}
if (!cpu_has_vmx_vpid_cap(VMX_VPID_INVVPID)) {
pr_fatal("%s, invvpid not supported\n", __func__);
return -ENODEV;
}
return 0;
} }
void invept(struct vcpu *vcpu) void invept(struct vcpu *vcpu)
@ -495,8 +504,6 @@ void init_paging(void)
pr_dbg("HV MMU Initialization"); pr_dbg("HV MMU Initialization");
check_mmu_capability();
/* Allocate memory for Hypervisor PML4 table */ /* Allocate memory for Hypervisor PML4 table */
mmu_pml4_addr = alloc_paging_struct(); mmu_pml4_addr = alloc_paging_struct();

View File

@ -150,10 +150,6 @@ int hv_main(int cpu_id)
ASSERT((uint64_t) cpu_id == get_cpu_id(), ASSERT((uint64_t) cpu_id == get_cpu_id(),
"cpu_id/tsc_aux mismatch"); "cpu_id/tsc_aux mismatch");
/* Check if virtualization extensions are supported */
ret = cpu_has_cap(X86_FEATURE_VMX);
ASSERT(ret == 1, "VMX not supported!");
/* Enable virtualization extensions */ /* Enable virtualization extensions */
ret = exec_vmxon_instr(); ret = exec_vmxon_instr();
ASSERT(ret == 0, "Unable to enable VMX!"); ASSERT(ret == 0, "Unable to enable VMX!");

View File

@ -225,14 +225,13 @@ extern uint64_t pcpu_active_bitmap;
/* CPUID feature words */ /* CPUID feature words */
enum feature_word { enum feature_word {
FEAT_1_ECX = 0, /* CPUID[1].ECX */ FEAT_1_ECX = 0, /* CPUID[1].ECX */
FEAT_1_EDX = 1, /* CPUID[1].EDX */ FEAT_1_EDX, /* CPUID[1].EDX */
FEAT_7_0_EBX = 2, /* CPUID[EAX=7,ECX=0].EBX */ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
FEAT_7_0_ECX = 3, /* CPUID[EAX=7,ECX=0].ECX */ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
FEAT_7_0_EDX = 4, /* CPUID[EAX=7,ECX=0].EDX */ FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
FEAT_8000_0000_EAX = 5, /* CPUID[8000_0000].EAX */ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
FEAT_8000_0001_ECX = 6, /* CPUID[8000_0001].ECX */ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
FEAT_8000_0001_EDX = 7, /* CPUID[8000_0001].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EAX */
FEAT_8000_0008_EAX = 8, /* CPUID[8000_0008].EAX */
FEATURE_WORDS, FEATURE_WORDS,
}; };
@ -243,6 +242,9 @@ struct cpu_state_info {
struct cpuinfo_x86 { struct cpuinfo_x86 {
uint8_t x86, x86_model; uint8_t x86, x86_model;
uint8_t x86_virt_bits;
uint8_t x86_phys_bits;
uint32_t extended_cpuid_level;
uint64_t physical_address_mask; uint64_t physical_address_mask;
uint32_t cpuid_leaves[FEATURE_WORDS]; uint32_t cpuid_leaves[FEATURE_WORDS];
char model_name[64]; char model_name[64];

View File

@ -54,6 +54,7 @@
#define X86_FEATURE_x2APIC ((FEAT_1_ECX << 5) + 21) #define X86_FEATURE_x2APIC ((FEAT_1_ECX << 5) + 21)
#define X86_FEATURE_MOVBE ((FEAT_1_ECX << 5) + 22) #define X86_FEATURE_MOVBE ((FEAT_1_ECX << 5) + 22)
#define X86_FEATURE_POPCNT ((FEAT_1_ECX << 5) + 23) #define X86_FEATURE_POPCNT ((FEAT_1_ECX << 5) + 23)
#define X86_FEATURE_TSC_DEADLINE ((FEAT_1_ECX << 5) + 24)
#define X86_FEATURE_AES ((FEAT_1_ECX << 5) + 25) #define X86_FEATURE_AES ((FEAT_1_ECX << 5) + 25)
#define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5) + 26) #define X86_FEATURE_XSAVE ((FEAT_1_ECX << 5) + 26)
#define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5) + 27) #define X86_FEATURE_OSXSAVE ((FEAT_1_ECX << 5) + 27)
@ -93,13 +94,17 @@
/* Intel-defined CPU features, CPUID level 0x00000007 (EBX)*/ /* Intel-defined CPU features, CPUID level 0x00000007 (EBX)*/
#define X86_FEATURE_TSC_ADJ ((FEAT_7_0_EBX << 5) + 1) #define X86_FEATURE_TSC_ADJ ((FEAT_7_0_EBX << 5) + 1)
#define X86_FEATURE_SMEP ((FEAT_7_0_EBX << 5) + 7)
#define X86_FEATURE_INVPCID ((FEAT_7_0_EBX << 5) + 10) #define X86_FEATURE_INVPCID ((FEAT_7_0_EBX << 5) + 10)
#define X86_FEATURE_SMAP ((FEAT_7_0_EBX << 5) + 20)
/* Intel-defined CPU features, CPUID level 0x00000007 (EDX)*/ /* Intel-defined CPU features, CPUID level 0x00000007 (EDX)*/
#define X86_FEATURE_IBRS_IBPB ((FEAT_7_0_EDX << 5) + 26) #define X86_FEATURE_IBRS_IBPB ((FEAT_7_0_EDX << 5) + 26)
#define X86_FEATURE_STIBP ((FEAT_7_0_EDX << 5) + 27) #define X86_FEATURE_STIBP ((FEAT_7_0_EDX << 5) + 27)
/* Intel-defined CPU features, CPUID level 0x80000001 (EDX)*/ /* Intel-defined CPU features, CPUID level 0x80000001 (EDX)*/
#define X86_FEATURE_NX ((FEAT_8000_0001_EDX << 5) + 20)
#define X86_FEATURE_PAGE1GB ((FEAT_8000_0001_EDX << 5) + 26) #define X86_FEATURE_PAGE1GB ((FEAT_8000_0001_EDX << 5) + 26)
#define X86_FEATURE_LM ((FEAT_8000_0001_EDX << 5) + 29)
#endif /*__X86_CPUFEATURES_H__*/ #endif /*__X86_CPUFEATURES_H__*/

View File

@ -170,7 +170,7 @@
#define IA32E_NUM_ENTRIES 512 #define IA32E_NUM_ENTRIES 512
#define IA32E_INDEX_MASK (uint64_t)(IA32E_NUM_ENTRIES - 1) #define IA32E_INDEX_MASK (uint64_t)(IA32E_NUM_ENTRIES - 1)
#define IA32E_REF_MASK \ #define IA32E_REF_MASK \
(boot_cpu_data.physical_address_mask) (boot_cpu_data.physical_address_mask)
#define IA32E_FIRST_BLOCK_INDEX 1 #define IA32E_FIRST_BLOCK_INDEX 1
/* Macro to get PML4 index given an address */ /* Macro to get PML4 index given an address */
@ -326,6 +326,7 @@ int unmap_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags); uint64_t size, uint32_t flags);
int modify_mem(struct map_params *map_params, void *paddr, void *vaddr, int modify_mem(struct map_params *map_params, void *paddr, void *vaddr,
uint64_t size, uint32_t flags); uint64_t size, uint32_t flags);
int check_vmx_mmu_cap(void);
void invept(struct vcpu *vcpu); void invept(struct vcpu *vcpu);
bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size); bool check_continuous_hpa(struct vm *vm, uint64_t gpa, uint64_t size);
int obtain_last_page_table_entry(struct map_params *map_params, int obtain_last_page_table_entry(struct map_params *map_params,