hv: cat: isolate hypervisor from rtvm

Currently, the clos id of the cpu cores in vmx root mode is the same as non-root mode.
For RTVM, if hypervisor share the same clos id with non-root mode, the cacheline may
be polluted due to the hypervisor code execution when vmexit.

The patch adds hv_clos in vm_configurations.c
Hypervisor initializes clos setting according to hv_clos during physical cpu cores initialization.
For RTVM,  MSR auto load/store areas are used to switch different settings for VMX root/non-root
mode for RTVM.

Tracked-On: #2462
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu
2019-08-13 08:51:15 +00:00
committed by ACRN System Integration
parent 38ca8db19f
commit cd1ae7a89e
7 changed files with 42 additions and 13 deletions

View File

@@ -758,21 +758,11 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
int32_t ret;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];
uint64_t orig_val, final_val;
struct acrn_vm_config *conf;
ret = create_vcpu(pcpu_id, vm, &vcpu);
if (ret == 0) {
set_pcpu_used(pcpu_id);
/* Update CLOS for this CPU */
if (cat_cap_info.enabled) {
conf = get_vm_config(vm->vm_id);
orig_val = msr_read(MSR_IA32_PQR_ASSOC);
final_val = (orig_val & 0xffffffffUL) | (((uint64_t)conf->clos) << 32UL);
msr_write_pcpu(MSR_IA32_PQR_ASSOC, final_val, pcpu_id);
}
INIT_LIST_HEAD(&vcpu->sched_obj.run_list);
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->sched_obj.name, 16U, thread_name, 16U);

View File

@@ -451,7 +451,7 @@ static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
* MSRs on load from memory on VM entry from mem address provided by
* VM-entry MSR load address field
*/
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, MSR_AREA_COUNT);
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest));
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
@@ -493,8 +493,8 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu)
* The 64 bit VM-exit MSR store and load address fields provide the
* corresponding addresses
*/
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, MSR_AREA_COUNT);
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MSR_AREA_COUNT);
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest));
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.host));
}

View File

@@ -15,6 +15,7 @@
#include <sgx.h>
#include <guest_pm.h>
#include <ucode.h>
#include <cat.h>
#include <trace.h>
#include <logmsg.h>
@@ -283,10 +284,26 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, uint32_t mode)
*/
static void init_msr_area(struct acrn_vcpu *vcpu)
{
struct acrn_vm_config *cfg = get_vm_config(vcpu->vm->vm_id);
vcpu->arch.msr_area.count = 0U;
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
vcpu->arch.msr_area.count++;
/* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */
if (cat_cap_info.enabled && (cfg->clos != hv_clos)) {
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2prq_msr(cfg->clos);
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2prq_msr(hv_clos);
vcpu->arch.msr_area.count++;
pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x",
vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, cfg->clos);
}
}
/**