mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-25 15:02:13 +00:00
hv: nested: enable multiple active VMCS12 support
This patch changes the size of vvmcs[] array from 1 to PER_VCPU_ACTIVE_VVMCS_NUM, and actually enables multiple active VMCS12 support in ACRN. The basic operations: - if L1 VMPTRLDs a VMCS12 without previously VMCLEAR the current VMCS12, ACRN no longer unconditionally flushes the current VMCS12 back to L1. Instead, it tries to keep both the current and the newly loaded VMCS12 in the nested->vvmcs[] array, unless: - if there is no more available vvmcs[] entry, ACRN flushes one active VMCS12 to make room for this new VMCS12. Tracked-On: #6289 Signed-off-by: Zide Chen <zide.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
ce8d69333a
commit
45b036e028
@ -18,6 +18,8 @@
|
||||
/* Cache the content of MSR_IA32_VMX_BASIC */
|
||||
static uint32_t vmx_basic;
|
||||
|
||||
static void clear_vvmcs(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs);
|
||||
|
||||
/* The only purpose of this array is to serve the is_vmx_msr() function */
|
||||
static const uint32_t vmx_msrs[NUM_VMX_MSRS] = {
|
||||
LIST_OF_VMX_MSRS
|
||||
@ -701,6 +703,28 @@ static bool validate_nvmx_cr4(struct acrn_vcpu *vcpu)
|
||||
msr_read(MSR_IA32_VMX_CR4_FIXED1));
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
static void reset_vvmcs(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vvmcs *vvmcs;
|
||||
uint32_t idx;
|
||||
|
||||
vcpu->arch.nested.current_vvmcs = NULL;
|
||||
|
||||
for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) {
|
||||
vvmcs = &vcpu->arch.nested.vvmcs[idx];
|
||||
vvmcs->host_state_dirty = false;
|
||||
vvmcs->control_fields_dirty = false;
|
||||
vvmcs->vmcs12_gpa = INVALID_GPA;
|
||||
vvmcs->ref_cnt = 0;
|
||||
|
||||
(void)memset(vvmcs->vmcs02, 0U, PAGE_SIZE);
|
||||
(void)memset(&vvmcs->vmcs12, 0U, sizeof(struct acrn_vmcs12));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
@ -734,12 +758,10 @@ int32_t vmxon_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
nested_vmx_result(VMfailInvalid, 0);
|
||||
} else {
|
||||
vcpu->arch.nested.vmxon = true;
|
||||
vcpu->arch.nested.vvmcs[0].host_state_dirty = false;
|
||||
vcpu->arch.nested.vvmcs[0].control_fields_dirty = false;
|
||||
vcpu->arch.nested.in_l2_guest = false;
|
||||
vcpu->arch.nested.vmxon_ptr = vmptr_gpa;
|
||||
vcpu->arch.nested.current_vvmcs = NULL;
|
||||
|
||||
reset_vvmcs(vcpu);
|
||||
nested_vmx_result(VMsucceed, 0);
|
||||
}
|
||||
}
|
||||
@ -780,14 +802,9 @@ int32_t vmxoff_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
if (check_vmx_permission(vcpu)) {
|
||||
vcpu->arch.nested.vmxon = false;
|
||||
vcpu->arch.nested.vvmcs[0].host_state_dirty = false;
|
||||
vcpu->arch.nested.vvmcs[0].control_fields_dirty = false;
|
||||
vcpu->arch.nested.in_l2_guest = false;
|
||||
vcpu->arch.nested.current_vvmcs = NULL;
|
||||
|
||||
(void)memset(vcpu->arch.nested.vvmcs[0].vmcs02, 0U, PAGE_SIZE);
|
||||
(void)memset(&vcpu->arch.nested.vvmcs[0].vmcs12, 0U, sizeof(struct acrn_vmcs12));
|
||||
|
||||
reset_vvmcs(vcpu);
|
||||
nested_vmx_result(VMsucceed, 0);
|
||||
}
|
||||
|
||||
@ -805,6 +822,63 @@ static inline bool is_ro_vmcs_field(uint32_t field)
|
||||
return (VMX_VMCS_FIELD_WIDTH_16 != w) && (VMX_VMCS_FIELD_TYPE(field) == 1U);
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
static struct acrn_vvmcs *lookup_vvmcs(struct acrn_vcpu *vcpu, uint64_t vmcs12_gpa)
|
||||
{
|
||||
struct acrn_vvmcs *vvmcs = NULL;
|
||||
uint32_t idx;
|
||||
|
||||
for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) {
|
||||
if (vcpu->arch.nested.vvmcs[idx].vmcs12_gpa == vmcs12_gpa) {
|
||||
vvmcs = &vcpu->arch.nested.vvmcs[idx];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return vvmcs;
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
static struct acrn_vvmcs *get_or_replace_vvmcs_entry(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_nested *nested = &vcpu->arch.nested;
|
||||
struct acrn_vvmcs *vvmcs = NULL;
|
||||
uint32_t idx, min_cnt = ~0U;
|
||||
|
||||
/* look for an inactive entry first */
|
||||
for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) {
|
||||
if (nested->vvmcs[idx].vmcs12_gpa == INVALID_GPA) {
|
||||
/* found an inactive vvmcs[] entry. */
|
||||
vvmcs = &nested->vvmcs[idx];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* In case we have to release an active entry to make room for the new VMCS12 */
|
||||
if (vvmcs == NULL) {
|
||||
for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) {
|
||||
/* look for the entry with least reference count */
|
||||
if (nested->vvmcs[idx].ref_cnt < min_cnt) {
|
||||
min_cnt = nested->vvmcs[idx].ref_cnt;
|
||||
vvmcs = &nested->vvmcs[idx];
|
||||
}
|
||||
}
|
||||
|
||||
clear_vvmcs(vcpu, vvmcs);
|
||||
}
|
||||
|
||||
/* reset ref_cnt for all entries */
|
||||
for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) {
|
||||
nested->vvmcs[idx].ref_cnt = 0U;
|
||||
}
|
||||
|
||||
return vvmcs;
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief emulate VMREAD instruction from L1
|
||||
* @pre vcpu != NULL
|
||||
@ -1032,7 +1106,7 @@ static void disable_vmcs_shadowing(void)
|
||||
* @pre vcpu != NULL
|
||||
* @pre vmcs01 is current
|
||||
*/
|
||||
static void clear_vmcs02(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs)
|
||||
static void clear_vvmcs(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs)
|
||||
{
|
||||
/*
|
||||
* Now VMCS02 is active and being used as a shadow VMCS.
|
||||
@ -1075,7 +1149,7 @@ static void clear_vmcs02(struct acrn_vcpu *vcpu, struct acrn_vvmcs *vvmcs)
|
||||
int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_nested *nested = &vcpu->arch.nested;
|
||||
struct acrn_vvmcs *vvmcs = &nested->vvmcs[0];
|
||||
struct acrn_vvmcs *vvmcs;
|
||||
uint64_t vmcs12_gpa;
|
||||
|
||||
if (check_vmx_permission(vcpu)) {
|
||||
@ -1091,22 +1165,37 @@ int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
/* VMPTRLD current VMCS12, do nothing */
|
||||
nested_vmx_result(VMsucceed, 0);
|
||||
} else {
|
||||
if (vvmcs->vmcs12_gpa != INVALID_GPA) {
|
||||
vvmcs = lookup_vvmcs(vcpu, vmcs12_gpa);
|
||||
if (vvmcs == NULL) {
|
||||
vvmcs = get_or_replace_vvmcs_entry(vcpu);
|
||||
|
||||
/* Create the VMCS02 based on this new VMCS12 */
|
||||
|
||||
/*
|
||||
* L1 hypervisor VMPTRLD a new VMCS12, or VMPTRLD a VMCLEARed VMCS12.
|
||||
* The current VMCS12 remains active but ACRN needs to sync the content of it
|
||||
* to guest memory so that the new VMCS12 can be loaded to the cache VMCS12.
|
||||
* initialize VMCS02
|
||||
* VMCS revision ID must equal to what reported by IA32_VMX_BASIC MSR
|
||||
*/
|
||||
clear_vmcs02(vcpu, vvmcs);
|
||||
(void)memcpy_s(vvmcs->vmcs02, 4U, (void *)&vmx_basic, 4U);
|
||||
|
||||
/* VMPTRLD VMCS02 so that we can VMWRITE to it */
|
||||
load_va_vmcs(vvmcs->vmcs02);
|
||||
init_host_state();
|
||||
|
||||
/* Load VMCS12 from L1 guest memory */
|
||||
(void)copy_from_gpa(vcpu->vm, (void *)&vvmcs->vmcs12, vmcs12_gpa,
|
||||
sizeof(struct acrn_vmcs12));
|
||||
|
||||
/* if needed, create nept_desc and allocate shadow root for the EPTP */
|
||||
get_nept_desc(vvmcs->vmcs12.ept_pointer);
|
||||
|
||||
/* Need to load shadow fields from this new VMCS12 to VMCS02 */
|
||||
sync_vmcs12_to_vmcs02(vcpu, &vvmcs->vmcs12);
|
||||
} else {
|
||||
vvmcs->ref_cnt += 1U;
|
||||
}
|
||||
|
||||
/* Create the VMCS02 based on this new VMCS12 */
|
||||
|
||||
/*
|
||||
* initialize VMCS02
|
||||
* VMCS revision ID must equal to what reported by IA32_VMX_BASIC MSR
|
||||
*/
|
||||
(void)memcpy_s(vvmcs->vmcs02, 4U, (void *)&vmx_basic, 4U);
|
||||
/* Before VMCS02 is being used as a shadow VMCS, VMCLEAR it */
|
||||
clear_va_vmcs(vvmcs->vmcs02);
|
||||
|
||||
/*
|
||||
* Now VMCS02 is not active, set the shadow-VMCS indicator.
|
||||
@ -1114,23 +1203,6 @@ int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
set_vmcs02_shadow_indicator(vvmcs);
|
||||
|
||||
/* VMPTRLD VMCS02 so that we can VMWRITE to it */
|
||||
load_va_vmcs(vvmcs->vmcs02);
|
||||
init_host_state();
|
||||
|
||||
/* Load VMCS12 from L1 guest memory */
|
||||
(void)copy_from_gpa(vcpu->vm, (void *)&vvmcs->vmcs12, vmcs12_gpa,
|
||||
sizeof(struct acrn_vmcs12));
|
||||
|
||||
/* if needed, create nept_desc and allocate shadow root for the EPTP */
|
||||
get_nept_desc(vvmcs->vmcs12.ept_pointer);
|
||||
|
||||
/* Need to load shadow fields from this new VMCS12 to VMCS02 */
|
||||
sync_vmcs12_to_vmcs02(vcpu, &vvmcs->vmcs12);
|
||||
|
||||
/* Before VMCS02 is being used as a shadow VMCS, VMCLEAR it */
|
||||
clear_va_vmcs(vvmcs->vmcs02);
|
||||
|
||||
/* Switch back to vmcs01 */
|
||||
load_va_vmcs(vcpu->arch.vmcs);
|
||||
|
||||
@ -1152,7 +1224,7 @@ int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
int32_t vmclear_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_nested *nested = &vcpu->arch.nested;
|
||||
struct acrn_vvmcs *vvmcs = &nested->vvmcs[0];
|
||||
struct acrn_vvmcs *vvmcs;
|
||||
uint64_t vmcs12_gpa;
|
||||
|
||||
if (check_vmx_permission(vcpu)) {
|
||||
@ -1163,21 +1235,36 @@ int32_t vmclear_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
} else if (vmcs12_gpa == nested->vmxon_ptr) {
|
||||
nested_vmx_result(VMfailValid, VMXERR_VMCLEAR_VMXON_POINTER);
|
||||
} else {
|
||||
if (vvmcs->vmcs12_gpa == vmcs12_gpa) {
|
||||
/*
|
||||
* The target VMCS12 is active and current.
|
||||
* VMCS02 is active and being used as a shadow VMCS.
|
||||
*/
|
||||
vvmcs = lookup_vvmcs(vcpu, vmcs12_gpa);
|
||||
if (vvmcs != NULL) {
|
||||
uint64_t current_vmcs12_gpa = INVALID_GPA;
|
||||
|
||||
/* Save for comparison */
|
||||
if (nested->current_vvmcs) {
|
||||
current_vmcs12_gpa = nested->current_vvmcs->vmcs12_gpa;
|
||||
}
|
||||
|
||||
/* VMCLEAR an active VMCS12, may or may not be current */
|
||||
vvmcs->vmcs12.launch_state = VMCS12_LAUNCH_STATE_CLEAR;
|
||||
|
||||
clear_vmcs02(vcpu, vvmcs);
|
||||
clear_vvmcs(vcpu, vvmcs);
|
||||
|
||||
/* Switch back to vmcs01 (no VMCS shadowing) */
|
||||
load_va_vmcs(vcpu->arch.vmcs);
|
||||
|
||||
/* no current VMCS12 */
|
||||
nested->current_vvmcs = NULL;
|
||||
if (current_vmcs12_gpa != INVALID_GPA) {
|
||||
if (current_vmcs12_gpa == vmcs12_gpa) {
|
||||
/* VMCLEAR current VMCS12 */
|
||||
nested->current_vvmcs = NULL;
|
||||
} else {
|
||||
/*
|
||||
* VMCLEAR an active but not current VMCS12.
|
||||
* VMCS shadowing was cleared earlier in clear_vvmcs()
|
||||
*/
|
||||
enable_vmcs_shadowing(nested->current_vvmcs);
|
||||
}
|
||||
} else {
|
||||
/* do nothing if there is no current VMCS12 */
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* we need to update the VMCS12 launch state in L1 memory in these two cases:
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <asm/guest/nested.h>
|
||||
|
||||
#define VETP_LOG_LEVEL LOG_DEBUG
|
||||
#define CONFIG_MAX_GUEST_EPT_NUM 4
|
||||
#define CONFIG_MAX_GUEST_EPT_NUM (MAX_ACTIVE_VVMCS_NUM * MAX_VCPUS_PER_VM)
|
||||
static struct nept_desc nept_desc_bucket[CONFIG_MAX_GUEST_EPT_NUM];
|
||||
static spinlock_t nept_desc_bucket_lock;
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#ifndef NESTED_H
|
||||
#define NESTED_H
|
||||
|
||||
#include <asm/vm_config.h>
|
||||
#include <lib/errno.h>
|
||||
|
||||
/* helper data structure to make VMX capability MSR manipulation easier */
|
||||
@ -333,12 +334,15 @@ struct acrn_vvmcs {
|
||||
uint8_t vmcs02[PAGE_SIZE]; /* VMCS to run L2 and as Link Pointer in VMCS01 */
|
||||
struct acrn_vmcs12 vmcs12; /* To cache L1's VMCS12*/
|
||||
uint64_t vmcs12_gpa; /* The corresponding L1 GPA for this VMCS12 */
|
||||
uint32_t ref_cnt; /* Count of being VMPTRLDed without VMCLEARed */
|
||||
bool host_state_dirty; /* To indicate need to merge VMCS12 host-state fields to VMCS01 */
|
||||
bool control_fields_dirty; /* For all other non-host-state fields that need to be merged */
|
||||
} __aligned(PAGE_SIZE);
|
||||
|
||||
#define MAX_ACTIVE_VVMCS_NUM 4
|
||||
|
||||
struct acrn_nested {
|
||||
struct acrn_vvmcs vvmcs[1];
|
||||
struct acrn_vvmcs vvmcs[MAX_ACTIVE_VVMCS_NUM];
|
||||
struct acrn_vvmcs *current_vvmcs; /* Refer to the current loaded VMCS12 */
|
||||
uint64_t vmxon_ptr; /* GPA */
|
||||
bool vmxon; /* To indicate if vCPU entered VMX operation */
|
||||
|
@ -215,7 +215,7 @@ struct iwkey {
|
||||
};
|
||||
|
||||
struct acrn_vcpu_arch {
|
||||
/* vmcs region for this vcpu, MUST be 4KB-aligned */
|
||||
/* vmcs region for this vcpu, MUST be 4KB-aligned. This is VMCS01 when nested VMX is enabled */
|
||||
uint8_t vmcs[PAGE_SIZE];
|
||||
|
||||
/* context for nested virtualization, 4KB-aligned */
|
||||
|
Loading…
Reference in New Issue
Block a user