hv: fix a bug about host/guest msr store/load

Unify the handling of host/guest MSR area in VMCS. Remove the emum value
as the element index when there are a few of MSRs in host/guest area.
Because the index could be changed if one element not used. So, use a
variable to save the index which will be used.

Tracked-On: #6966
Acked-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Minggui Cao <minggui.cao@intel.com>
This commit is contained in:
Minggui Cao 2022-03-15 22:19:26 +08:00 committed by acrnsi-robot
parent 9be4a282c4
commit 05ca1d7641
3 changed files with 16 additions and 16 deletions

View File

@ -409,10 +409,10 @@ int32_t write_vclosid(struct acrn_vcpu *vcpu, uint64_t val)
* Write the new pCLOSID value to the guest msr area
*
* The prepare_auto_msr_area() function has already initialized the vcpu->arch.msr_area.
* Here we only need to update the vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value field,
* Here we only need to update the vcpu->arch.msr_area.guest[].value field for IA32_PQR_ASSOC,
* all other vcpu->arch.msr_area fields remains unchanged at runtime.
*/
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(pclosid);
vcpu->arch.msr_area.guest[vcpu->arch.msr_area.index_of_pqr_assoc].value = clos2pqr_msr(pclosid);
ret = 0;
}

View File

@ -333,10 +333,10 @@ static void prepare_auto_msr_area(struct acrn_vcpu *vcpu)
/* in HV, disable perf/PMC counting, just count in guest VM */
if (is_pmu_pt_configured(vcpu->vm)) {
vcpu->arch.msr_area.guest[MSR_AREA_PERF_CTRL].msr_index = MSR_IA32_PERF_GLOBAL_CTRL;
vcpu->arch.msr_area.guest[MSR_AREA_PERF_CTRL].value = 0;
vcpu->arch.msr_area.host[MSR_AREA_PERF_CTRL].msr_index = MSR_IA32_PERF_GLOBAL_CTRL;
vcpu->arch.msr_area.host[MSR_AREA_PERF_CTRL].value = 0;
vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PERF_GLOBAL_CTRL;
vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].value = 0;
vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PERF_GLOBAL_CTRL;
vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].value = 0;
vcpu->arch.msr_area.count++;
}
@ -352,16 +352,19 @@ static void prepare_auto_msr_area(struct acrn_vcpu *vcpu)
* vCAT: always load/restore MSR_IA32_PQR_ASSOC
*/
if (is_vcat_configured(vcpu->vm) || (vcpu_clos != hv_clos)) {
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos);
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(hv_clos);
vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].value = clos2pqr_msr(vcpu_clos);
vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].value = clos2pqr_msr(hv_clos);
vcpu->arch.msr_area.index_of_pqr_assoc = vcpu->arch.msr_area.count;
vcpu->arch.msr_area.count++;
pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x",
vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, vcpu_clos);
}
}
ASSERT(vcpu->arch.msr_area.count <= MSR_AREA_COUNT, "error, please check MSR_AREA_COUNT!");
}
/**
@ -386,7 +389,7 @@ void init_emulated_msrs(struct acrn_vcpu *vcpu)
#ifdef CONFIG_VCAT_ENABLED
/*
* init_vcat_msrs() will overwrite the vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value
* init_vcat_msrs() will overwrite the vcpu->arch.msr_area.guest[].value for MSR_IA32_PQR_ASSOC
* set by prepare_auto_msr_area()
*/
init_vcat_msrs(vcpu);

View File

@ -212,15 +212,12 @@ struct msr_store_entry {
uint64_t value;
} __aligned(16);
enum {
MSR_AREA_IA32_PQR_ASSOC = 0,
MSR_AREA_PERF_CTRL,
MSR_AREA_COUNT,
};
#define MSR_AREA_COUNT 2 /* the max MSRs in auto load/store area */
struct msr_store_area {
struct msr_store_entry guest[MSR_AREA_COUNT];
struct msr_store_entry host[MSR_AREA_COUNT];
uint32_t index_of_pqr_assoc;
uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */
};