mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-10 20:43:48 +00:00
hv: switch IA32_TSC_AUX between host/guest through VM Controls
Currently guest IA32_TSC_AUX MSR is loaded manually right before VM entry, and saved right after VM exit. This patch enables VM-Entry Control and VM-Exit Control to switch MSR IA32_TSC_AUX between host and guest automatically. This helps to keep vcpu_thread() function and struct acrn_vcpu cleaner. Also it removes the dead code of intercepting IA32_TSC_AUX. Tracked-On: #1867 Signed-off-by: Zide Chen <zide.chen@intel.com> Reviewed-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
d0b37f8ea2
commit
b627c2c979
@ -611,10 +611,6 @@ int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* init_vmcs is delayed to vcpu vmcs launch first time */
|
|
||||||
/* initialize the vcpu tsc aux */
|
|
||||||
vcpu->msr_tsc_aux_guest = vcpu->vcpu_id;
|
|
||||||
|
|
||||||
set_pcpu_used(pcpu_id);
|
set_pcpu_used(pcpu_id);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vcpu->run_list);
|
INIT_LIST_HEAD(&vcpu->run_list);
|
||||||
|
@ -132,6 +132,14 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void init_msr_area(struct acrn_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
|
||||||
|
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
|
||||||
|
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
|
||||||
|
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
|
||||||
|
}
|
||||||
|
|
||||||
void init_msr_emulation(struct acrn_vcpu *vcpu)
|
void init_msr_emulation(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
@ -182,6 +190,9 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
|
|||||||
value64 = hva2hpa(vcpu->vm->arch_vm.msr_bitmap);
|
value64 = hva2hpa(vcpu->vm->arch_vm.msr_bitmap);
|
||||||
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
|
||||||
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
||||||
|
|
||||||
|
/* Initialize the MSR save/store area */
|
||||||
|
init_msr_area(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||||
@ -259,11 +270,6 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
v = vmx_rdmsr_pat(vcpu);
|
v = vmx_rdmsr_pat(vcpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MSR_IA32_TSC_AUX:
|
|
||||||
{
|
|
||||||
v = vcpu->arch.msr_tsc_aux;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MSR_IA32_APIC_BASE:
|
case MSR_IA32_APIC_BASE:
|
||||||
{
|
{
|
||||||
/* Read APIC base */
|
/* Read APIC base */
|
||||||
@ -396,11 +402,6 @@ int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
exec_vmwrite(VMX_GUEST_GS_BASE, v);
|
exec_vmwrite(VMX_GUEST_GS_BASE, v);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MSR_IA32_TSC_AUX:
|
|
||||||
{
|
|
||||||
vcpu->arch.msr_tsc_aux = v;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case MSR_IA32_APIC_BASE:
|
case MSR_IA32_APIC_BASE:
|
||||||
{
|
{
|
||||||
err = vlapic_wrmsr(vcpu, msr, v);
|
err = vlapic_wrmsr(vcpu, msr, v);
|
||||||
|
@ -956,7 +956,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||||||
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
|
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t value32;
|
uint32_t value32;
|
||||||
|
|
||||||
@ -985,7 +985,8 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
|||||||
* MSRs on load from memory on VM entry from mem address provided by
|
* MSRs on load from memory on VM entry from mem address provided by
|
||||||
* VM-entry MSR load address field
|
* VM-entry MSR load address field
|
||||||
*/
|
*/
|
||||||
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, 0U);
|
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, MSR_AREA_COUNT);
|
||||||
|
exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
|
||||||
|
|
||||||
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
|
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
|
||||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
|
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
|
||||||
@ -997,7 +998,7 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
|||||||
exec_vmwrite32(VMX_ENTRY_INSTR_LENGTH, 0U);
|
exec_vmwrite32(VMX_ENTRY_INSTR_LENGTH, 0U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_exit_ctrl(void)
|
static void init_exit_ctrl(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t value32;
|
uint32_t value32;
|
||||||
|
|
||||||
@ -1029,8 +1030,10 @@ static void init_exit_ctrl(void)
|
|||||||
* The 64 bit VM-exit MSR store and load address fields provide the
|
* The 64 bit VM-exit MSR store and load address fields provide the
|
||||||
* corresponding addresses
|
* corresponding addresses
|
||||||
*/
|
*/
|
||||||
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, 0U);
|
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, MSR_AREA_COUNT);
|
||||||
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, 0U);
|
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MSR_AREA_COUNT);
|
||||||
|
exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
|
||||||
|
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.host);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1061,7 +1064,7 @@ void init_vmcs(struct acrn_vcpu *vcpu)
|
|||||||
init_exec_ctrl(vcpu);
|
init_exec_ctrl(vcpu);
|
||||||
init_guest_state(vcpu);
|
init_guest_state(vcpu);
|
||||||
init_entry_ctrl(vcpu);
|
init_entry_ctrl(vcpu);
|
||||||
init_exit_ctrl();
|
init_exit_ctrl(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_PARTITION_MODE
|
#ifndef CONFIG_PARTITION_MODE
|
||||||
|
@ -20,7 +20,6 @@ static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
|
|||||||
void vcpu_thread(struct acrn_vcpu *vcpu)
|
void vcpu_thread(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t basic_exit_reason = 0U;
|
uint32_t basic_exit_reason = 0U;
|
||||||
uint64_t tsc_aux_hyp_cpu = (uint64_t) vcpu->pcpu_id;
|
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
/* If vcpu is not launched, we need to do init_vmcs first */
|
/* If vcpu is not launched, we need to do init_vmcs first */
|
||||||
@ -62,12 +61,6 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
|
|||||||
|
|
||||||
profiling_vmenter_handler(vcpu);
|
profiling_vmenter_handler(vcpu);
|
||||||
|
|
||||||
/* Restore guest TSC_AUX */
|
|
||||||
if (vcpu->launched) {
|
|
||||||
cpu_msr_write(MSR_IA32_TSC_AUX,
|
|
||||||
vcpu->msr_tsc_aux_guest);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = run_vcpu(vcpu);
|
ret = run_vcpu(vcpu);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
pr_fatal("vcpu resume failed");
|
pr_fatal("vcpu resume failed");
|
||||||
@ -76,10 +69,6 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vcpu->arch.nrexits++;
|
vcpu->arch.nrexits++;
|
||||||
/* Save guest TSC_AUX */
|
|
||||||
cpu_msr_read(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
|
|
||||||
/* Restore native TSC_AUX */
|
|
||||||
cpu_msr_write(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
|
|
||||||
|
|
||||||
CPU_IRQ_ENABLE();
|
CPU_IRQ_ENABLE();
|
||||||
/* Dispatch handler */
|
/* Dispatch handler */
|
||||||
|
@ -176,6 +176,23 @@ struct cpu_context {
|
|||||||
struct ext_context ext_ctx;
|
struct ext_context ext_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Intel SDM 24.8.2, the address must be 16-byte aligned */
|
||||||
|
struct msr_store_entry {
|
||||||
|
uint32_t msr_num;
|
||||||
|
uint32_t reserved;
|
||||||
|
uint64_t value;
|
||||||
|
} __aligned(16);
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MSR_AREA_TSC_AUX = 0,
|
||||||
|
MSR_AREA_COUNT,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct msr_store_area {
|
||||||
|
struct msr_store_entry guest[MSR_AREA_COUNT];
|
||||||
|
struct msr_store_entry host[MSR_AREA_COUNT];
|
||||||
|
};
|
||||||
|
|
||||||
struct acrn_vcpu_arch {
|
struct acrn_vcpu_arch {
|
||||||
/* vmcs region for this vcpu, MUST be 4KB-aligned */
|
/* vmcs region for this vcpu, MUST be 4KB-aligned */
|
||||||
uint8_t vmcs[CPU_PAGE_SIZE];
|
uint8_t vmcs[CPU_PAGE_SIZE];
|
||||||
@ -199,9 +216,6 @@ struct acrn_vcpu_arch {
|
|||||||
uint32_t irq_window_enabled;
|
uint32_t irq_window_enabled;
|
||||||
uint32_t nrexits;
|
uint32_t nrexits;
|
||||||
|
|
||||||
/* Auxiliary TSC value */
|
|
||||||
uint64_t msr_tsc_aux;
|
|
||||||
|
|
||||||
/* VCPU context state information */
|
/* VCPU context state information */
|
||||||
uint32_t exit_reason;
|
uint32_t exit_reason;
|
||||||
uint32_t idt_vectoring_info;
|
uint32_t idt_vectoring_info;
|
||||||
@ -217,6 +231,8 @@ struct acrn_vcpu_arch {
|
|||||||
bool inject_event_pending;
|
bool inject_event_pending;
|
||||||
struct event_injection_info inject_info;
|
struct event_injection_info inject_info;
|
||||||
|
|
||||||
|
/* List of MSRS to be stored and loaded on VM exits or VM entries */
|
||||||
|
struct msr_store_area msr_area;
|
||||||
} __aligned(CPU_PAGE_SIZE);
|
} __aligned(CPU_PAGE_SIZE);
|
||||||
|
|
||||||
struct acrn_vm;
|
struct acrn_vm;
|
||||||
@ -242,14 +258,6 @@ struct acrn_vcpu {
|
|||||||
|
|
||||||
struct io_request req; /* used by io/ept emulation */
|
struct io_request req; /* used by io/ept emulation */
|
||||||
|
|
||||||
/* save guest msr tsc aux register.
|
|
||||||
* Before VMENTRY, save guest MSR_TSC_AUX to this fields.
|
|
||||||
* After VMEXIT, restore this fields to guest MSR_TSC_AUX.
|
|
||||||
* This is only temperary workaround. Once MSR emulation
|
|
||||||
* is enabled, we should remove this fields and related
|
|
||||||
* code.
|
|
||||||
*/
|
|
||||||
uint64_t msr_tsc_aux_guest;
|
|
||||||
uint64_t guest_msrs[IDX_MAX_MSR];
|
uint64_t guest_msrs[IDX_MAX_MSR];
|
||||||
#ifdef CONFIG_MTRR_ENABLED
|
#ifdef CONFIG_MTRR_ENABLED
|
||||||
struct mtrr_state mtrr;
|
struct mtrr_state mtrr;
|
||||||
|
Loading…
Reference in New Issue
Block a user