diff --git a/hypervisor/arch/x86/configs/vm_config.c b/hypervisor/arch/x86/configs/vm_config.c index 3aafe0f7f..1470497e2 100644 --- a/hypervisor/arch/x86/configs/vm_config.c +++ b/hypervisor/arch/x86/configs/vm_config.c @@ -96,6 +96,23 @@ static bool check_vm_uuid_collision(uint16_t vm_id) return ret; } +static bool check_vm_clos_config(uint16_t vm_id) +{ + uint16_t i; + bool ret = true; + struct acrn_vm_config *vm_config = get_vm_config(vm_id); + + for (i = 0U; i < vm_config->vcpu_num; i++) { + if (vm_config->clos[i] >= platform_clos_num) { + pr_err("vm%u: vcpu%u clos(%u) exceed the max clos(%u).", + vm_id, i, vm_config->clos[i], platform_clos_num); + ret = false; + break; + } + } + return ret; +} + /** * @pre vm_config != NULL */ @@ -192,11 +209,8 @@ bool sanitize_vm_config(void) break; } - if (ret && - is_platform_rdt_capable() && - (vm_config->clos >= platform_clos_num)) { - pr_err("%s set wrong CLOS. Please set below %d\n", __func__, platform_clos_num); - ret = false; + if (ret && is_platform_rdt_capable()) { + ret = check_vm_clos_config(vm_id); } if (ret && diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index d77c5933b..b8961050d 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -292,6 +292,7 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, uint32_t mode) static void init_msr_area(struct acrn_vcpu *vcpu) { struct acrn_vm_config *cfg = get_vm_config(vcpu->vm->vm_id); + uint16_t vcpu_clos = cfg->clos[vcpu->vcpu_id]; vcpu->arch.msr_area.count = 0U; @@ -302,14 +303,14 @@ static void init_msr_area(struct acrn_vcpu *vcpu) vcpu->arch.msr_area.count++; /* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */ - if (is_platform_rdt_capable() && (cfg->clos != hv_clos)) { + if (is_platform_rdt_capable() && (vcpu_clos != hv_clos)) { vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC; - vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(cfg->clos); + vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(vcpu_clos); vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC; vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2pqr_msr(hv_clos); vcpu->arch.msr_area.count++; pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x", - vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, cfg->clos); + vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, vcpu_clos); } } diff --git a/hypervisor/include/arch/x86/vm_config.h b/hypervisor/include/arch/x86/vm_config.h index 27e820774..49f2dbb43 100644 --- a/hypervisor/include/arch/x86/vm_config.h +++ b/hypervisor/include/arch/x86/vm_config.h @@ -130,7 +130,7 @@ struct acrn_vm_config { uint16_t pci_dev_num; /* indicate how many PCI devices in VM */ struct acrn_vm_pci_dev_config *pci_devs; /* point to PCI devices BDF list */ struct acrn_vm_os_config os_config; /* OS information the VM */ - uint16_t clos; /* Class of Service, effective only if CONFIG_CAT_ENABLED + uint16_t clos[MAX_VCPUS_PER_VM]; /* Class of Service, effective only if CONFIG_RDT_ENABLED * is defined on CAT capable platforms */ diff --git a/hypervisor/scenarios/hybrid/vm_configurations.c b/hypervisor/scenarios/hybrid/vm_configurations.c index 82775efcc..663285623 100644 --- a/hypervisor/scenarios/hybrid/vm_configurations.c +++ b/hypervisor/scenarios/hybrid/vm_configurations.c @@ -17,7 +17,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { .guest_flags = 0UL, .vcpu_num = 1U, .vcpu_affinity = VM0_CONFIG_VCPU_AFFINITY, - .clos = 0U, .severity = SEVERITY_SAFETY_VM, .memory = { .start_hpa = VM0_CONFIG_MEM_START_HPA, @@ -52,7 +51,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { /* dbbbd434-7a57-4216-a12c-2201f1ab0240 */ .guest_flags = 0UL, - .clos = 0U, .severity = SEVERITY_SOS, .memory = { .start_hpa = 0UL, diff --git a/hypervisor/scenarios/industry/vm_configurations.c b/hypervisor/scenarios/industry/vm_configurations.c index 685b1e416..5210f43be 100644 --- a/hypervisor/scenarios/industry/vm_configurations.c +++ b/hypervisor/scenarios/industry/vm_configurations.c @@ -16,8 +16,8 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { 0xa1U, 0x2cU, 0x22U, 0x01U, 0xf1U, 0xabU, 0x02U, 0x40U}, /* dbbbd434-7a57-4216-a12c-2201f1ab0240 */ .guest_flags = 0UL, - .clos = 0U, .severity = SEVERITY_SOS, + .clos = { 0U }, .memory = { .start_hpa = 0UL, .size = CONFIG_SOS_RAM_SIZE, diff --git a/hypervisor/scenarios/logical_partition/vm_configurations.c b/hypervisor/scenarios/logical_partition/vm_configurations.c index 6147459eb..0f3cfb461 100644 --- a/hypervisor/scenarios/logical_partition/vm_configurations.c +++ b/hypervisor/scenarios/logical_partition/vm_configurations.c @@ -19,7 +19,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { /* 26c5e0d8-8f8a-47d8-8109-f201ebd61a5e */ .vcpu_num = 2U, .vcpu_affinity = VM0_CONFIG_VCPU_AFFINITY, - .clos = 0U, .memory = { .start_hpa = VM0_CONFIG_MEM_START_HPA, .size = VM0_CONFIG_MEM_SIZE, @@ -59,7 +58,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { .vcpu_num = 2U, .vcpu_affinity = VM1_CONFIG_VCPU_AFFINITY, .guest_flags = (GUEST_FLAG_RT | GUEST_FLAG_LAPIC_PASSTHROUGH), - .clos = 0U, .memory = { .start_hpa = VM1_CONFIG_MEM_START_HPA, .size = VM1_CONFIG_MEM_SIZE, diff --git a/hypervisor/scenarios/sdc/vm_configurations.c b/hypervisor/scenarios/sdc/vm_configurations.c index 1a5b26dd1..56ffa9949 100644 --- a/hypervisor/scenarios/sdc/vm_configurations.c +++ b/hypervisor/scenarios/sdc/vm_configurations.c @@ -18,7 +18,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { /* Allow SOS to reboot the host since there is supposed to be the highest severity guest */ .guest_flags = 0UL, - .clos = 0U, .severity = SEVERITY_SOS, .memory = { .start_hpa = 0UL, diff --git a/hypervisor/scenarios/sdc2/vm_configurations.c b/hypervisor/scenarios/sdc2/vm_configurations.c index 03539924c..28506a42b 100644 --- a/hypervisor/scenarios/sdc2/vm_configurations.c +++ b/hypervisor/scenarios/sdc2/vm_configurations.c @@ -18,7 +18,6 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { /* Allow SOS to reboot the host since there is supposed to be the highest severity guest */ .guest_flags = 0UL, - .clos = 0U, .severity = SEVERITY_SOS, .memory = { .start_hpa = 0UL,