diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index 25dc74f13..65ba1059f 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -86,6 +86,17 @@ static uint32_t emulated_guest_msrs[NUM_EMULATED_MSRS] = { MSR_IA32_MPERF, MSR_IA32_APERF, + /* + * Thermal MSRs: + * CPUID.01H.EDX[22] IA32_THERM_INTERRUPT, IA32_THERM_STATUS, MSR_IA32_CLOCK_MODULATION + * CPUID.06H:EAX[6] IA32_PACKAGE_THERM_INTERRUPT, IA32_PACKAGE_THERM_STATUS + */ + MSR_IA32_CLOCK_MODULATION, + MSR_IA32_THERM_INTERRUPT, + MSR_IA32_THERM_STATUS, + MSR_IA32_PACKAGE_THERM_INTERRUPT, + MSR_IA32_PACKAGE_THERM_STATUS, + /* VMX: CPUID.01H.ECX[5] */ #ifdef CONFIG_NVMX_ENABLED LIST_OF_VMX_MSRS, @@ -615,6 +626,17 @@ bool is_iwkey_backup_support(struct acrn_vcpu *vcpu) return (ebx & CPUID_EBX_KL_BACKUP_MSR) == CPUID_EBX_KL_BACKUP_MSR; } +/** + * @pre vcpu != NULL + */ +bool is_ecmd_supported(struct acrn_vcpu *vcpu) +{ + uint32_t eax = 0x6U, ebx = 0U, ecx = 0U, edx = 0U; + /* ECMD. Check clock modulation duty cycle extension is supported */ + guest_cpuid(vcpu, &eax, &ebx, &ecx, &edx); + return (eax & CPUID_EAX_ECMD) == CPUID_EAX_ECMD; +} + /** * @pre vcpu != NULL */ @@ -652,6 +674,15 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu) v = vcpu_get_guest_msr(vcpu, MSR_IA32_TSC_ADJUST); break; } + case MSR_IA32_CLOCK_MODULATION: + case MSR_IA32_THERM_STATUS: + case MSR_IA32_THERM_INTERRUPT: + case MSR_IA32_PACKAGE_THERM_INTERRUPT: + case MSR_IA32_PACKAGE_THERM_STATUS: + { + v = msr_read(msr); + break; + } case MSR_IA32_MTRR_CAP: case MSR_IA32_MTRR_DEF_TYPE: case MSR_IA32_MTRR_FIX64K_00000: @@ -1046,6 +1077,59 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu) set_guest_tsc(vcpu, v); break; } + case MSR_IA32_CLOCK_MODULATION: + { + if (is_vtm_configured(vcpu->vm)) { + /*if extended clock modulation duty(ECMD) is not supported, + *bit 0 is reserved. + */ + if (is_ecmd_supported(vcpu)) { + err = msr_write_safe(msr, v, MSR_IA32_CLOCK_MODULATION_RSV_BITS); + } else { + err = msr_write_safe(msr, v, MSR_IA32_CLOCK_MODULATION_RSV_BITS | + 0x1UL); + } + } else { + err = -EACCES; + } + break; + } + case MSR_IA32_THERM_STATUS: + { + if (is_vtm_configured(vcpu->vm)) { + err = msr_write_safe(msr, v, MSR_IA32_THERM_STATUS_RSV_BITS); + } else { + err = -EACCES; + } + break; + } + case MSR_IA32_THERM_INTERRUPT: + { + if (is_vtm_configured(vcpu->vm)) { + err = msr_write_safe(msr, v, MSR_IA32_THERM_INTERRUPT_RSV_BITS); + } else { + err = -EACCES; + } + break; + } + case MSR_IA32_PACKAGE_THERM_INTERRUPT: + { + if (is_vtm_configured(vcpu->vm)) { + err = msr_write_safe(msr, v, MSR_IA32_PACKAGE_THERM_INTERRUPT_RSV_BITS); + } else { + err = -EACCES; + } + break; + } + case MSR_IA32_PACKAGE_THERM_STATUS: + { + if (is_vtm_configured(vcpu->vm)) { + err = msr_write_safe(msr, v, MSR_IA32_PACKAGE_THERM_STATUS_RSV_BITS); + } else { + err = -EACCES; + } + break; + } case MSR_IA32_MTRR_DEF_TYPE: case MSR_IA32_MTRR_FIX64K_00000: case MSR_IA32_MTRR_FIX16K_80000: diff --git a/hypervisor/include/arch/x86/asm/cpu.h b/hypervisor/include/arch/x86/asm/cpu.h index 5dacf910f..7028384a1 100755 --- a/hypervisor/include/arch/x86/asm/cpu.h +++ b/hypervisor/include/arch/x86/asm/cpu.h @@ -40,6 +40,7 @@ #include #include #include +#include /* Define CPU stack alignment */ #define CPU_STACK_ALIGN 16UL @@ -704,6 +705,17 @@ static inline void msr_write(uint32_t reg_num, uint64_t value64) cpu_msr_write(reg_num, value64); } +static inline int32_t msr_write_safe(uint32_t reg_num, uint64_t value64, uint64_t rsvd) +{ + int32_t err = 0; + + if ((value64 & rsvd) == 0) { + msr_write(reg_num, value64); + } else { + err = -EACCES; + } + return err; +} /* wrmsr/rdmsr smp call data */ struct msr_data_struct { diff --git a/hypervisor/include/arch/x86/asm/cpuid.h b/hypervisor/include/arch/x86/asm/cpuid.h index 70a4d0748..deb156699 100644 --- a/hypervisor/include/arch/x86/asm/cpuid.h +++ b/hypervisor/include/arch/x86/asm/cpuid.h @@ -74,6 +74,8 @@ #define CPUID_EDX_TM1 (1U<<29U) #define CPUID_EDX_IA64 (1U<<30U) #define CPUID_EDX_PBE (1U<<31U) +/* CPUID.06H:EAX.ECMD */ +#define CPUID_EAX_ECMD (1U<<5U) /* CPUID.06H:EAX.HWP */ #define CPUID_EAX_HWP (1U<<7U) /* CPUID.06H:EAX.HWP_Notification */ diff --git a/hypervisor/include/arch/x86/asm/guest/vcpu.h b/hypervisor/include/arch/x86/asm/guest/vcpu.h index 7b10dadb8..04ca654d1 100644 --- a/hypervisor/include/arch/x86/asm/guest/vcpu.h +++ b/hypervisor/include/arch/x86/asm/guest/vcpu.h @@ -175,7 +175,7 @@ enum reset_mode; #define SECURE_WORLD 1 #define NUM_WORLD_MSRS 2U -#define NUM_COMMON_MSRS 31U +#define NUM_COMMON_MSRS 36U #ifdef CONFIG_VCAT_ENABLED #define NUM_CAT_L2_MSRS MAX_CACHE_CLOS_NUM_ENTRIES diff --git a/hypervisor/include/arch/x86/asm/msr.h b/hypervisor/include/arch/x86/asm/msr.h index 42d4708c8..c7c9714a2 100644 --- a/hypervisor/include/arch/x86/asm/msr.h +++ b/hypervisor/include/arch/x86/asm/msr.h @@ -689,4 +689,11 @@ void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu); #define MSR_IA32_HWP_REQUEST_RSV_BITS (0x7FFF80000000000UL) #define MSR_IA32_HWP_REQUEST_PKG_CTL (1UL << 42U) +/* Thermal MSR reserved bits */ +#define MSR_IA32_CLOCK_MODULATION_RSV_BITS (~0x1FUL) +#define MSR_IA32_THERM_STATUS_RSV_BITS (~0xAAAUL) +#define MSR_IA32_THERM_INTERRUPT_RSV_BITS (~0x1FFFF1FUL) +#define MSR_IA32_PACKAGE_THERM_STATUS_RSV_BITS (~0xAA2UL) +#define MSR_IA32_PACKAGE_THERM_INTERRUPT_RSV_BITS (~0x1FFFF17UL) + #endif /* MSR_H */