mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-23 17:58:30 +00:00
hv: emulate IA32_EFER and adjust Load EFER VMX controls
This helps to improve performance: - Don't need to execute VMREAD in vcpu_get_efer(), which is frequently called. - VMX_EXIT_CTLS_SAVE_EFER can be removed from VM-Exit Controls. - If the value of IA32_EFER MSR is identical between the host and guest (highly likely), adjust the VMX controls not to load IA32_EFER on VMExit and VMEntry. It's convenient to continue use the exiting vcpu_s/get_efer() APIs, other than the common vcpu_s/get_guest_msr(). Tracked-On: #6289 Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com> Signed-off-by: Zide Chen <zide.chen@intel.com>
This commit is contained in:
@@ -92,10 +92,6 @@ uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
|
|||||||
struct run_context *ctx =
|
struct run_context *ctx =
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||||
|
|
||||||
if (!bitmap_test(CPU_REG_EFER, &vcpu->reg_updated) &&
|
|
||||||
!bitmap_test_and_set_lock(CPU_REG_EFER, &vcpu->reg_cached)) {
|
|
||||||
ctx->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
|
|
||||||
}
|
|
||||||
return ctx->ia32_efer;
|
return ctx->ia32_efer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,6 +99,16 @@ void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
|
|||||||
{
|
{
|
||||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.ia32_efer
|
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.ia32_efer
|
||||||
= val;
|
= val;
|
||||||
|
|
||||||
|
if (val == msr_read(MSR_IA32_EFER)) {
|
||||||
|
clear_vmcs_bit(VMX_ENTRY_CONTROLS, VMX_ENTRY_CTLS_LOAD_EFER);
|
||||||
|
clear_vmcs_bit(VMX_EXIT_CONTROLS, VMX_EXIT_CTLS_LOAD_EFER);
|
||||||
|
} else {
|
||||||
|
set_vmcs_bit(VMX_ENTRY_CONTROLS, VMX_ENTRY_CTLS_LOAD_EFER);
|
||||||
|
set_vmcs_bit(VMX_EXIT_CONTROLS, VMX_EXIT_CTLS_LOAD_EFER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Write the new value to VMCS in either case */
|
||||||
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
|
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -481,12 +481,8 @@ static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
|
|||||||
/* Log messages to show initializing VMX entry controls */
|
/* Log messages to show initializing VMX entry controls */
|
||||||
pr_dbg("Initialize Entry control ");
|
pr_dbg("Initialize Entry control ");
|
||||||
|
|
||||||
/* Set up VMX entry controls - pg 2908 24.8.1 * Set IA32e guest mode -
|
/* Set up VMX entry controls - ISDM 24.8.1 */
|
||||||
* on VM entry processor is in IA32e 64 bitmode * Start guest with host
|
value32 = VMX_ENTRY_CTLS_LOAD_PAT;
|
||||||
* IA32_PAT and IA32_EFER
|
|
||||||
*/
|
|
||||||
value32 = (VMX_ENTRY_CTLS_LOAD_EFER |
|
|
||||||
VMX_ENTRY_CTLS_LOAD_PAT);
|
|
||||||
|
|
||||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||||
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
||||||
@@ -525,13 +521,11 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu)
|
|||||||
* size is 64 bit Set up to acknowledge interrupt on exit, if 1 the HW
|
* size is 64 bit Set up to acknowledge interrupt on exit, if 1 the HW
|
||||||
* acks the interrupt in VMX non-root and saves the interrupt vector to
|
* acks the interrupt in VMX non-root and saves the interrupt vector to
|
||||||
* the relevant VM exit field for further processing by Hypervisor
|
* the relevant VM exit field for further processing by Hypervisor
|
||||||
* Enable saving and loading of IA32_PAT and IA32_EFER on VMEXIT Enable
|
* Enable saving and loading IA32_PAT on VMEXIT
|
||||||
* saving of pre-emption timer on VMEXIT
|
|
||||||
*/
|
*/
|
||||||
value32 = check_vmx_ctrl(MSR_IA32_VMX_EXIT_CTLS,
|
value32 = check_vmx_ctrl(MSR_IA32_VMX_EXIT_CTLS,
|
||||||
VMX_EXIT_CTLS_ACK_IRQ | VMX_EXIT_CTLS_SAVE_PAT |
|
VMX_EXIT_CTLS_ACK_IRQ | VMX_EXIT_CTLS_SAVE_PAT |
|
||||||
VMX_EXIT_CTLS_LOAD_PAT | VMX_EXIT_CTLS_LOAD_EFER |
|
VMX_EXIT_CTLS_LOAD_PAT | VMX_EXIT_CTLS_HOST_ADDR64);
|
||||||
VMX_EXIT_CTLS_SAVE_EFER | VMX_EXIT_CTLS_HOST_ADDR64);
|
|
||||||
|
|
||||||
exec_vmwrite32(VMX_EXIT_CONTROLS, value32);
|
exec_vmwrite32(VMX_EXIT_CONTROLS, value32);
|
||||||
pr_dbg("VMX_EXIT_CONTROL: 0x%x ", value32);
|
pr_dbg("VMX_EXIT_CONTROL: 0x%x ", value32);
|
||||||
|
@@ -37,6 +37,7 @@ static const uint32_t emulated_guest_msrs[NUM_GUEST_MSRS] = {
|
|||||||
* Number of entries: NUM_WORLD_MSRS
|
* Number of entries: NUM_WORLD_MSRS
|
||||||
*/
|
*/
|
||||||
MSR_IA32_PAT,
|
MSR_IA32_PAT,
|
||||||
|
MSR_IA32_EFER,
|
||||||
MSR_IA32_TSC_ADJUST,
|
MSR_IA32_TSC_ADJUST,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -523,6 +524,11 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
v = vcpu_get_guest_msr(vcpu, MSR_IA32_PAT);
|
v = vcpu_get_guest_msr(vcpu, MSR_IA32_PAT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case MSR_IA32_EFER:
|
||||||
|
{
|
||||||
|
v = vcpu_get_efer(vcpu);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case MSR_IA32_APIC_BASE:
|
case MSR_IA32_APIC_BASE:
|
||||||
{
|
{
|
||||||
/* Read APIC base */
|
/* Read APIC base */
|
||||||
@@ -873,6 +879,11 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
err = write_pat_msr(vcpu, v);
|
err = write_pat_msr(vcpu, v);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case MSR_IA32_EFER:
|
||||||
|
{
|
||||||
|
vcpu_set_efer(vcpu, v);
|
||||||
|
break;
|
||||||
|
}
|
||||||
case MSR_IA32_APIC_BASE:
|
case MSR_IA32_APIC_BASE:
|
||||||
{
|
{
|
||||||
err = vlapic_set_apicbase(vcpu_vlapic(vcpu), v);
|
err = vlapic_set_apicbase(vcpu_vlapic(vcpu), v);
|
||||||
|
@@ -160,7 +160,13 @@ void enable_paging(void)
|
|||||||
* instruction fetching from pages with XD bit set.
|
* instruction fetching from pages with XD bit set.
|
||||||
*/
|
*/
|
||||||
tmp64 = msr_read(MSR_IA32_EFER);
|
tmp64 = msr_read(MSR_IA32_EFER);
|
||||||
tmp64 |= MSR_IA32_EFER_NXE_BIT;
|
|
||||||
|
/*
|
||||||
|
* SCE bit is not used by the host. However we set this bit so that
|
||||||
|
* it's highly likely that the value of IA32_EFER the host and the guest
|
||||||
|
* is identical, and we don't need to switch this MSR on VMX transitions
|
||||||
|
*/
|
||||||
|
tmp64 |= MSR_IA32_EFER_NXE_BIT | MSR_IA32_EFER_SCE_BIT;
|
||||||
msr_write(MSR_IA32_EFER, tmp64);
|
msr_write(MSR_IA32_EFER, tmp64);
|
||||||
|
|
||||||
/* Enable Write Protect, inhibiting writing to read-only pages */
|
/* Enable Write Protect, inhibiting writing to read-only pages */
|
||||||
|
@@ -172,7 +172,7 @@ enum reset_mode;
|
|||||||
#define SECURE_WORLD 1
|
#define SECURE_WORLD 1
|
||||||
|
|
||||||
#define NUM_WORLD_MSRS 2U
|
#define NUM_WORLD_MSRS 2U
|
||||||
#define NUM_COMMON_MSRS 22U
|
#define NUM_COMMON_MSRS 23U
|
||||||
#ifdef CONFIG_NVMX_ENABLED
|
#ifdef CONFIG_NVMX_ENABLED
|
||||||
#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VMX_MSRS)
|
#define NUM_GUEST_MSRS (NUM_WORLD_MSRS + NUM_COMMON_MSRS + NUM_VMX_MSRS)
|
||||||
#else
|
#else
|
||||||
|
@@ -40,6 +40,25 @@ static inline uint64_t apic_access_offset(uint64_t qual)
|
|||||||
{
|
{
|
||||||
return (qual & APIC_ACCESS_OFFSET);
|
return (qual & APIC_ACCESS_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void clear_vmcs_bit(uint32_t vmcs_field, uint32_t bit)
|
||||||
|
{
|
||||||
|
uint64_t val64;
|
||||||
|
|
||||||
|
val64 = exec_vmread(vmcs_field);
|
||||||
|
val64 &= ~bit;
|
||||||
|
exec_vmwrite(vmcs_field, val64);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_vmcs_bit(uint32_t vmcs_field, uint32_t bit)
|
||||||
|
{
|
||||||
|
uint64_t val64;
|
||||||
|
|
||||||
|
val64 = exec_vmread(vmcs_field);
|
||||||
|
val64 |= bit;
|
||||||
|
exec_vmwrite(vmcs_field, val64);
|
||||||
|
}
|
||||||
|
|
||||||
void init_vmcs(struct acrn_vcpu *vcpu);
|
void init_vmcs(struct acrn_vcpu *vcpu);
|
||||||
void load_vmcs(const struct acrn_vcpu *vcpu);
|
void load_vmcs(const struct acrn_vcpu *vcpu);
|
||||||
void init_host_state(void);
|
void init_host_state(void);
|
||||||
|
Reference in New Issue
Block a user