mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 12:12:16 +00:00
hv: vmx_capability: add cpu_has_vmx_ept/vpid_cap API
Refine and simple vmx_capability API defination. Signed-off-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
d7d2ef8c88
commit
80d194cfb5
@ -44,19 +44,15 @@ enum mem_map_request_type {
|
|||||||
PAGING_REQUEST_TYPE_UNKNOWN,
|
PAGING_REQUEST_TYPE_UNKNOWN,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mm_capability {
|
static struct vmx_capability {
|
||||||
bool ept_x_only_supported;
|
uint32_t ept;
|
||||||
/* EPT and MMU 1-GByte page supported flag */
|
uint32_t vpid;
|
||||||
bool ept_1gb_page_supported;
|
} vmx_caps;
|
||||||
bool invept_supported;
|
|
||||||
bool invept_single_context_supported;
|
static struct mm_capability {
|
||||||
bool invept_global_context_supported;
|
/* MMU 1-GByte page supported flag */
|
||||||
bool invvpid_supported;
|
|
||||||
bool invvpid_single_context_supported;
|
|
||||||
bool invvpid_global_context_supported;
|
|
||||||
bool mmu_1gb_page_supported;
|
bool mmu_1gb_page_supported;
|
||||||
};
|
} mm_caps;
|
||||||
static struct mm_capability mm_caps;
|
|
||||||
|
|
||||||
#define INVEPT_TYPE_SINGLE_CONTEXT 1UL
|
#define INVEPT_TYPE_SINGLE_CONTEXT 1UL
|
||||||
#define INVEPT_TYPE_ALL_CONTEXTS 2UL
|
#define INVEPT_TYPE_ALL_CONTEXTS 2UL
|
||||||
@ -93,31 +89,25 @@ static inline void inv_tlb_one_page(void *addr)
|
|||||||
asm volatile ("invlpg (%0)" : : "r" (addr) : "memory");
|
asm volatile ("invlpg (%0)" : : "r" (addr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool cpu_has_vmx_ept_cap(uint32_t bit_mask)
|
||||||
|
{
|
||||||
|
return !!(vmx_caps.ept & bit_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool cpu_has_vmx_vpid_cap(uint32_t bit_mask)
|
||||||
|
{
|
||||||
|
return !!(vmx_caps.vpid & bit_mask);
|
||||||
|
}
|
||||||
|
|
||||||
static void check_mmu_capability(void)
|
static void check_mmu_capability(void)
|
||||||
{
|
{
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
uint32_t eax, ebx, ecx, edx;
|
uint32_t eax, ebx, ecx, edx;
|
||||||
|
|
||||||
memset(&mm_caps, 0, sizeof(struct mm_capability));
|
|
||||||
|
|
||||||
/* Read the MSR register of EPT and VPID Capability - SDM A.10 */
|
/* Read the MSR register of EPT and VPID Capability - SDM A.10 */
|
||||||
val = msr_read(MSR_IA32_VMX_EPT_VPID_CAP);
|
val = msr_read(MSR_IA32_VMX_EPT_VPID_CAP);
|
||||||
mm_caps.ept_x_only_supported = (val & MSR_VMX_EPT_X_ONLY)
|
vmx_caps.ept = (uint32_t) val;
|
||||||
? (true) : (false);
|
vmx_caps.vpid = (uint32_t) (val >> 32);
|
||||||
mm_caps.ept_1gb_page_supported = (val & MSR_VMX_EPT_VPID_CAP_1GB)
|
|
||||||
? (true) : (false);
|
|
||||||
mm_caps.invept_supported =
|
|
||||||
(val & MSR_VMX_INVEPT) ? (true) : (false);
|
|
||||||
mm_caps.invept_single_context_supported =
|
|
||||||
(val & MSR_VMX_INVEPT_SINGLE_CONTEXT) ? (true) : (false);
|
|
||||||
mm_caps.invept_global_context_supported =
|
|
||||||
(val & MSR_VMX_INVEPT_GLOBAL_CONTEXT) ? (true) : (false);
|
|
||||||
mm_caps.invvpid_supported =
|
|
||||||
(val & MSR_VMX_INVVPID) ? (true) : (false);
|
|
||||||
mm_caps.invvpid_single_context_supported =
|
|
||||||
(val & MSR_VMX_INVVPID_SINGLE_CONTEXT) ? (true) : (false);
|
|
||||||
mm_caps.invvpid_global_context_supported =
|
|
||||||
(val & MSR_VMX_INVVPID_GLOBAL_CONTEXT) ? (true) : (false);
|
|
||||||
|
|
||||||
/* Read CPUID to check if PAGE1GB is supported
|
/* Read CPUID to check if PAGE1GB is supported
|
||||||
* SDM 4.1.4 If CPUID.80000001H:EDX.Page1GB[bit26]=1,
|
* SDM 4.1.4 If CPUID.80000001H:EDX.Page1GB[bit26]=1,
|
||||||
@ -127,32 +117,15 @@ static void check_mmu_capability(void)
|
|||||||
mm_caps.mmu_1gb_page_supported = (edx & CPUID_EDX_PAGE1GB) ?
|
mm_caps.mmu_1gb_page_supported = (edx & CPUID_EDX_PAGE1GB) ?
|
||||||
(true) : (false);
|
(true) : (false);
|
||||||
|
|
||||||
if (!mm_caps.invept_supported)
|
if (!cpu_has_vmx_ept_cap(VMX_EPT_INVEPT))
|
||||||
panic("invept must be supported");
|
panic("invept must be supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool check_ept_x_only_support(void)
|
|
||||||
{
|
|
||||||
return mm_caps.ept_x_only_supported;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool check_invept_single_support(void)
|
|
||||||
{
|
|
||||||
return mm_caps.invept_supported &&
|
|
||||||
mm_caps.invept_single_context_supported;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool check_invept_global_support(void)
|
|
||||||
{
|
|
||||||
return mm_caps.invept_supported &&
|
|
||||||
mm_caps.invept_global_context_supported;
|
|
||||||
}
|
|
||||||
|
|
||||||
void invept(struct vcpu *vcpu)
|
void invept(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct invept_desc desc = {0};
|
struct invept_desc desc = {0};
|
||||||
|
|
||||||
if (check_invept_single_support()) {
|
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
|
||||||
desc.eptp = vcpu->vm->arch_vm.nworld_eptp | (3 << 3) | 6;
|
desc.eptp = vcpu->vm->arch_vm.nworld_eptp | (3 << 3) | 6;
|
||||||
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||||
if (vcpu->vm->sworld_control.sworld_enabled) {
|
if (vcpu->vm->sworld_control.sworld_enabled) {
|
||||||
@ -161,7 +134,7 @@ void invept(struct vcpu *vcpu)
|
|||||||
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
|
||||||
|
|
||||||
}
|
}
|
||||||
} else if (check_invept_global_support())
|
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT))
|
||||||
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
|
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +143,7 @@ static bool check_mmu_1gb_support(struct map_params *map_params)
|
|||||||
bool status = false;
|
bool status = false;
|
||||||
|
|
||||||
if (map_params->page_table_type == PTT_EPT)
|
if (map_params->page_table_type == PTT_EPT)
|
||||||
status = mm_caps.ept_1gb_page_supported;
|
status = cpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE);
|
||||||
else
|
else
|
||||||
status = mm_caps.mmu_1gb_page_supported;
|
status = mm_caps.mmu_1gb_page_supported;
|
||||||
return status;
|
return status;
|
||||||
@ -191,7 +164,7 @@ static inline uint32_t check_page_table_present(int page_table_type,
|
|||||||
if ((table_entry == IA32E_EPT_W_BIT) ||
|
if ((table_entry == IA32E_EPT_W_BIT) ||
|
||||||
(table_entry == (IA32E_EPT_W_BIT | IA32E_EPT_X_BIT)) ||
|
(table_entry == (IA32E_EPT_W_BIT | IA32E_EPT_X_BIT)) ||
|
||||||
((table_entry == IA32E_EPT_X_BIT) &&
|
((table_entry == IA32E_EPT_X_BIT) &&
|
||||||
!check_ept_x_only_support()))
|
!cpu_has_vmx_ept_cap(VMX_EPT_EXECUTE_ONLY)))
|
||||||
return PT_MISCFG_PRESENT;
|
return PT_MISCFG_PRESENT;
|
||||||
} else {
|
} else {
|
||||||
table_entry &= (IA32E_COMM_P_BIT);
|
table_entry &= (IA32E_COMM_P_BIT);
|
||||||
@ -905,7 +878,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
|||||||
* current page size, obtain the starting physical address
|
* current page size, obtain the starting physical address
|
||||||
* aligned of current page size
|
* aligned of current page size
|
||||||
*/
|
*/
|
||||||
pa = ((((uint64_t)paddr) / page_size) * page_size);
|
pa = ((uint64_t)paddr) & ~(page_size - 1);
|
||||||
if (map_params->page_table_type == PTT_EPT) {
|
if (map_params->page_table_type == PTT_EPT) {
|
||||||
/* Keep original attribute(here &0x3f)
|
/* Keep original attribute(here &0x3f)
|
||||||
* bit 0(R) bit1(W) bit2(X) bit3~5 MT
|
* bit 0(R) bit1(W) bit2(X) bit3~5 MT
|
||||||
|
@ -515,16 +515,6 @@
|
|||||||
/* LINCROFT specific MSRs */
|
/* LINCROFT specific MSRs */
|
||||||
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0 /* Configure CAR */
|
#define MSR_LNC_BIOS_CACHE_AS_RAM 0x000002E0 /* Configure CAR */
|
||||||
|
|
||||||
/* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */
|
|
||||||
#define MSR_VMX_EPT_X_ONLY (1UL << 0)/* Execute Only */
|
|
||||||
#define MSR_VMX_EPT_VPID_CAP_1GB (1UL << 17)/* EPT 1GB page */
|
|
||||||
#define MSR_VMX_INVEPT (1UL << 20)/* INVEPT */
|
|
||||||
#define MSR_VMX_INVEPT_SINGLE_CONTEXT (1UL << 25)/* INVEPT Single */
|
|
||||||
#define MSR_VMX_INVEPT_GLOBAL_CONTEXT (1UL << 26)/* INVEPT Global */
|
|
||||||
#define MSR_VMX_INVVPID (1UL << 32)/* INVVPID */
|
|
||||||
#define MSR_VMX_INVVPID_SINGLE_CONTEXT (1UL << 41)/* INVVPID Single */
|
|
||||||
#define MSR_VMX_INVVPID_GLOBAL_CONTEXT (1UL << 42)/* INVVPID Global */
|
|
||||||
|
|
||||||
/* EFER bits */
|
/* EFER bits */
|
||||||
#define MSR_IA32_EFER_SCE_BIT (1<<0)
|
#define MSR_IA32_EFER_SCE_BIT (1<<0)
|
||||||
#define MSR_IA32_EFER_LME_BIT (1<<8) /* IA32e mode enable */
|
#define MSR_IA32_EFER_LME_BIT (1<<8) /* IA32e mode enable */
|
||||||
|
@ -325,6 +325,34 @@
|
|||||||
#define VMX_PROCBASED_CTLS2_EPT_VE (1<<18)
|
#define VMX_PROCBASED_CTLS2_EPT_VE (1<<18)
|
||||||
#define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1<<20)
|
#define VMX_PROCBASED_CTLS2_XSVE_XRSTR (1<<20)
|
||||||
|
|
||||||
|
/* MSR_IA32_VMX_EPT_VPID_CAP: EPT and VPID capability bits */
|
||||||
|
#define VMX_EPT_EXECUTE_ONLY (1 << 0)
|
||||||
|
#define VMX_EPT_PAGE_WALK_4 (1 << 6)
|
||||||
|
#define VMX_EPT_PAGE_WALK_5 (1 << 7)
|
||||||
|
#define VMX_EPTP_UC (1 << 8)
|
||||||
|
#define VMX_EPTP_WB (1 << 14)
|
||||||
|
#define VMX_EPT_2MB_PAGE (1 << 16)
|
||||||
|
#define VMX_EPT_1GB_PAGE (1 << 17)
|
||||||
|
#define VMX_EPT_INVEPT (1 << 20)
|
||||||
|
#define VMX_EPT_AD (1 << 21)
|
||||||
|
#define VMX_EPT_INVEPT_SINGLE_CONTEXT (1 << 25)
|
||||||
|
#define VMX_EPT_INVEPT_GLOBAL_CONTEXT (1 << 26)
|
||||||
|
|
||||||
|
#define VMX_VPID_INVVPID (1 << 0) /* (32 - 32) */
|
||||||
|
#define VMX_VPID_INVVPID_INDIVIDUAL_ADDR (1 << 8) /* (40 - 32) */
|
||||||
|
#define VMX_VPID_INVVPID_SINGLE_CONTEXT (1 << 9) /* (41 - 32) */
|
||||||
|
#define VMX_VPID_INVVPID_GLOBAL_CONTEXT (1 << 10) /* (42 - 32) */
|
||||||
|
#define VMX_VPID_INVVPID_SINGLE_NON_GLOBAL (1 << 11) /* (43 - 32) */
|
||||||
|
|
||||||
|
#define VMX_EPT_MT_EPTE_SHIFT 3
|
||||||
|
#define VMX_EPTP_PWL_MASK 0x38
|
||||||
|
#define VMX_EPTP_PWL_4 0x18
|
||||||
|
#define VMX_EPTP_PWL_5 0x20
|
||||||
|
#define VMX_EPTP_AD_ENABLE_BIT (1 << 6)
|
||||||
|
#define VMX_EPTP_MT_MASK 0x7
|
||||||
|
#define VMX_EPTP_MT_WB 0x6
|
||||||
|
#define VMX_EPTP_MT_UC 0x0
|
||||||
|
|
||||||
/* VMX exit control bits */
|
/* VMX exit control bits */
|
||||||
#define VMX_EXIT_CTLS_SAVE_DBG (1<<2)
|
#define VMX_EXIT_CTLS_SAVE_DBG (1<<2)
|
||||||
#define VMX_EXIT_CTLS_HOST_ADDR64 (1<<9)
|
#define VMX_EXIT_CTLS_HOST_ADDR64 (1<<9)
|
||||||
|
Loading…
Reference in New Issue
Block a user