hv: nested: support for VMREAD and VMWRITE emulation

This patch implements the VMREAD and VMWRITE instructions.

When L1 guest is running with an active VMCS12, the “VMCS shadowing”
VM-execution control is always set to 1 in VMCS01. Thus the possible
behavior of VMREAD or VMWRITE from L1 could be:

- It causes a VM exit to L0 if the bit corresponds to the target VMCS
  field in the VMREAD bitmap or VMWRITE bitmap is set to 1.
- It accesses the VMCS referenced by VMCS01 link pointer (VMCS02 in
  our case) if the above mentioned bit is set to 0.

This patch handles the VMREAD and VMWRITE VM exits in this way:

- on VMWRITE, it writes the desired VMCS value to the respective field
  in the cached VMCS12. For VMCS fields that need to be synced to VMCS02,
  sets the corresponding dirty flag.

- on VMREAD, it reads the desired VMCS value from the cached VMCS12.

Tracked-On: #5923
Signed-off-by: Alex Merritt <alex.merritt@intel.com>
Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com>
Signed-off-by: Zide Chen <zide.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@Intel.com>
This commit is contained in:
Zide Chen 2021-05-10 14:34:42 -07:00 committed by wenlingz
parent 2c17b88c95
commit 1ca0c7549e
3 changed files with 116 additions and 4 deletions

View File

@ -729,6 +729,8 @@ int32_t vmxon_vmexit_handler(struct acrn_vcpu *vcpu)
nested_vmx_result(VMfailInvalid, 0);
} else {
vcpu->arch.nested.vmxon = true;
vcpu->arch.nested.host_state_dirty = false;
vcpu->arch.nested.gpa_field_dirty = false;
vcpu->arch.nested.vmxon_ptr = vmptr_gpa;
vcpu->arch.nested.current_vmcs12_ptr = INVALID_GPA;
@ -772,6 +774,8 @@ int32_t vmxoff_vmexit_handler(struct acrn_vcpu *vcpu)
{
if (check_vmx_permission(vcpu)) {
vcpu->arch.nested.vmxon = false;
vcpu->arch.nested.host_state_dirty = false;
vcpu->arch.nested.gpa_field_dirty = false;
vcpu->arch.nested.current_vmcs12_ptr = INVALID_GPA;
(void)memset(vcpu->arch.nested.vmcs02, 0U, PAGE_SIZE);
(void)memset(&vcpu->arch.nested.vmcs12, 0U, sizeof(struct acrn_vmcs12));
@ -782,6 +786,102 @@ int32_t vmxoff_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}
/*
* Only VMCS fields of width 64-bit, 32-bit, and natural-width can be
* read-only. A value of 1 in bits [11:10] of these field encodings
* indicates a read-only field. ISDM Appendix B.
*/
static inline bool is_ro_vmcs_field(uint32_t field)
{
const uint8_t w = VMX_VMCS_FIELD_WIDTH(field);
return (VMX_VMCS_FIELD_WIDTH_16 != w) && (VMX_VMCS_FIELD_TYPE(field) == 1U);
}
/*
* @brief emulate VMREAD instruction from L1
* @pre vcpu != NULL
*/
int32_t vmread_vmexit_handler(struct acrn_vcpu *vcpu)
{
const uint32_t info = exec_vmread(VMX_INSTR_INFO);
uint64_t vmcs_value, gpa;
uint32_t vmcs_field;
if (check_vmx_permission(vcpu)) {
if (vcpu->arch.nested.current_vmcs12_ptr == INVALID_GPA) {
nested_vmx_result(VMfailInvalid, 0);
} else {
/* TODO: VMfailValid for invalid VMCS fields */
vmcs_field = (uint32_t)vcpu_get_gpreg(vcpu, VMX_II_REG2(info));
vmcs_value = vmcs12_read_field((uint64_t)&vcpu->arch.nested.vmcs12, vmcs_field);
/* Currently ACRN doesn't support 32bits L1 hypervisor, assuming operands are 64 bits */
if (VMX_II_IS_REG(info)) {
vcpu_set_gpreg(vcpu, VMX_II_REG1(info), vmcs_value);
} else {
gpa = get_vmx_memory_operand(vcpu, info);
(void)copy_to_gpa(vcpu->vm, &vmcs_value, gpa, 8U);
}
pr_dbg("vmcs_field: %x vmcs_value: %llx", vmcs_field, vmcs_value);
nested_vmx_result(VMsucceed, 0);
}
}
return 0;
}
/*
* @brief emulate VMWRITE instruction from L1
* @pre vcpu != NULL
*/
int32_t vmwrite_vmexit_handler(struct acrn_vcpu *vcpu)
{
const uint32_t info = exec_vmread(VMX_INSTR_INFO);
uint64_t vmcs_value, gpa;
uint32_t vmcs_field;
if (check_vmx_permission(vcpu)) {
if (vcpu->arch.nested.current_vmcs12_ptr == INVALID_GPA) {
nested_vmx_result(VMfailInvalid, 0);
} else {
/* TODO: VMfailValid for invalid VMCS fields */
vmcs_field = (uint32_t)vcpu_get_gpreg(vcpu, VMX_II_REG2(info));
if (is_ro_vmcs_field(vmcs_field) &&
((vcpu_get_guest_msr(vcpu, MSR_IA32_VMX_MISC) & (1UL << 29U)) == 0UL)) {
nested_vmx_result(VMfailValid, VMXERR_VMWRITE_RO_COMPONENT);
} else {
/* Currently not support 32bits L1 hypervisor, assuming operands are 64 bits */
if (VMX_II_IS_REG(info)) {
vmcs_value = vcpu_get_gpreg(vcpu, VMX_II_REG1(info));
} else {
gpa = get_vmx_memory_operand(vcpu, info);
(void)copy_from_gpa(vcpu->vm, &vmcs_value, gpa, 8U);
}
if (VMX_VMCS_FIELD_TYPE(vmcs_field) == VMX_VMCS_FIELD_TYPE_HOST) {
vcpu->arch.nested.host_state_dirty = true;
}
/*
* For simplicity, gpa_field_dirty could be used for all VMCS fields that
* are programmed by L1 hypervisor with a GPA address
*/
if (vmcs_field == VMX_MSR_BITMAP_FULL) {
vcpu->arch.nested.gpa_field_dirty = true;
}
pr_dbg("vmcs_field: %x vmcs_value: %llx", vmcs_field, vmcs_value);
vmcs12_write_field((uint64_t)&vcpu->arch.nested.vmcs12, vmcs_field, vmcs_value);
nested_vmx_result(VMsucceed, 0);
}
}
}
return 0;
}
/**
* @brief Sync shadow fields from vmcs02 to cache VMCS12
*

View File

@ -82,17 +82,17 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMPTRST] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMREAD] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMRESUME] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMWRITE] = {
.handler = undefined_vmexit_handler},
#ifndef CONFIG_NVMX_ENABLED
[VMX_EXIT_REASON_VMCLEAR] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMPTRLD] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMREAD] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMWRITE] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMXOFF] = {
.handler = undefined_vmexit_handler},
[VMX_EXIT_REASON_VMXON] = {
@ -104,6 +104,12 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_VMPTRLD] = {
.handler = vmptrld_vmexit_handler,
.need_exit_qualification = 1},
[VMX_EXIT_REASON_VMREAD] = {
.handler = vmread_vmexit_handler,
.need_exit_qualification = 1},
[VMX_EXIT_REASON_VMWRITE] = {
.handler = vmwrite_vmexit_handler,
.need_exit_qualification = 1},
[VMX_EXIT_REASON_VMXOFF] = {
.handler = vmxoff_vmexit_handler},
[VMX_EXIT_REASON_VMXON] = {

View File

@ -87,6 +87,8 @@ union value_64 {
#define VMXERR_VMPTRLD_INVALID_ADDRESS (9)
#define VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID (10)
#define VMXERR_VMPTRLD_VMXON_POINTER (11)
#define VMXERR_UNSUPPORTED_COMPONENT (12)
#define VMXERR_VMWRITE_RO_COMPONENT (13)
#define VMXERR_VMXON_IN_VMX_ROOT_OPERATION (15)
/*
@ -313,6 +315,8 @@ int32_t vmxon_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmxoff_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmptrld_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmclear_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmread_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t vmwrite_vmexit_handler(struct acrn_vcpu *vcpu);
#ifdef CONFIG_NVMX_ENABLED
struct acrn_nested {
@ -323,6 +327,8 @@ struct acrn_nested {
uint64_t current_vmcs12_ptr; /* GPA */
uint64_t vmxon_ptr; /* GPA */
bool vmxon; /* To indicate if vCPU entered VMX operation */
bool host_state_dirty; /* To indicate need to merge VMCS12 host-state fields to VMCS01 */
bool gpa_field_dirty;
} __aligned(PAGE_SIZE);
void init_nested_vmx(__unused struct acrn_vm *vm);