mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-06 03:10:13 +00:00
hv: switch IA32_TSC_AUX between host/guest through VM Controls
Currently guest IA32_TSC_AUX MSR is loaded manually right before VM entry, and saved right after VM exit. This patch enables VM-Entry Control and VM-Exit Control to switch MSR IA32_TSC_AUX between host and guest automatically. This helps to keep vcpu_thread() function and struct acrn_vcpu cleaner. Also it removes the dead code of intercepting IA32_TSC_AUX. Tracked-On: #1867 Signed-off-by: Zide Chen <zide.chen@intel.com> Reviewed-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -611,10 +611,6 @@ int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* init_vmcs is delayed to vcpu vmcs launch first time */
|
||||
/* initialize the vcpu tsc aux */
|
||||
vcpu->msr_tsc_aux_guest = vcpu->vcpu_id;
|
||||
|
||||
set_pcpu_used(pcpu_id);
|
||||
|
||||
INIT_LIST_HEAD(&vcpu->run_list);
|
||||
|
@@ -132,6 +132,14 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode)
|
||||
}
|
||||
}
|
||||
|
||||
static void init_msr_area(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
|
||||
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
|
||||
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
|
||||
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
|
||||
}
|
||||
|
||||
void init_msr_emulation(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t i;
|
||||
@@ -182,6 +190,9 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
|
||||
value64 = hva2hpa(vcpu->vm->arch_vm.msr_bitmap);
|
||||
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
||||
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
||||
|
||||
/* Initialize the MSR save/store area */
|
||||
init_msr_area(vcpu);
|
||||
}
|
||||
|
||||
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
@@ -259,11 +270,6 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
v = vmx_rdmsr_pat(vcpu);
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_TSC_AUX:
|
||||
{
|
||||
v = vcpu->arch.msr_tsc_aux;
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_APIC_BASE:
|
||||
{
|
||||
/* Read APIC base */
|
||||
@@ -396,11 +402,6 @@ int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite(VMX_GUEST_GS_BASE, v);
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_TSC_AUX:
|
||||
{
|
||||
vcpu->arch.msr_tsc_aux = v;
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_APIC_BASE:
|
||||
{
|
||||
err = vlapic_wrmsr(vcpu, msr, v);
|
||||
|
@@ -956,7 +956,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
|
||||
}
|
||||
|
||||
static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
||||
static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
|
||||
@@ -985,7 +985,8 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
||||
* MSRs on load from memory on VM entry from mem address provided by
|
||||
* VM-entry MSR load address field
|
||||
*/
|
||||
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, 0U);
|
||||
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, MSR_AREA_COUNT);
|
||||
exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
|
||||
|
||||
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
|
||||
@@ -997,7 +998,7 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite32(VMX_ENTRY_INSTR_LENGTH, 0U);
|
||||
}
|
||||
|
||||
static void init_exit_ctrl(void)
|
||||
static void init_exit_ctrl(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
|
||||
@@ -1029,8 +1030,10 @@ static void init_exit_ctrl(void)
|
||||
* The 64 bit VM-exit MSR store and load address fields provide the
|
||||
* corresponding addresses
|
||||
*/
|
||||
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, 0U);
|
||||
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, 0U);
|
||||
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, MSR_AREA_COUNT);
|
||||
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MSR_AREA_COUNT);
|
||||
exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
|
||||
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.host);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1061,7 +1064,7 @@ void init_vmcs(struct acrn_vcpu *vcpu)
|
||||
init_exec_ctrl(vcpu);
|
||||
init_guest_state(vcpu);
|
||||
init_entry_ctrl(vcpu);
|
||||
init_exit_ctrl();
|
||||
init_exit_ctrl(vcpu);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PARTITION_MODE
|
||||
|
Reference in New Issue
Block a user