diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 5b5f137f4..2d0d0c238 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -427,7 +427,9 @@ static void vlapic_icrtmr_write_handler(struct acrn_vlapic *vlapic) uint64_t vlapic_get_tsc_deadline_msr(const struct acrn_vlapic *vlapic) { uint64_t ret; - if (!vlapic_lvtt_tsc_deadline(vlapic)) { + if (is_lapic_pt_enabled(vlapic->vcpu)) { + ret = msr_read(MSR_IA32_TSC_DEADLINE) + exec_vmread64(VMX_TSC_OFFSET_FULL); + } else if (!vlapic_lvtt_tsc_deadline(vlapic)) { ret = 0UL; } else { ret = (vlapic->vtimer.timer.fire_tsc == 0UL) ? 0UL : @@ -443,7 +445,11 @@ void vlapic_set_tsc_deadline_msr(struct acrn_vlapic *vlapic, uint64_t val_arg) struct hv_timer *timer; uint64_t val = val_arg; - if (vlapic_lvtt_tsc_deadline(vlapic)) { + if (is_lapic_pt_enabled(vlapic->vcpu)) { + vcpu_set_guest_msr(vlapic->vcpu, MSR_IA32_TSC_DEADLINE, val); + val -= exec_vmread64(VMX_TSC_OFFSET_FULL); + msr_write(MSR_IA32_TSC_DEADLINE, val); + } else if (vlapic_lvtt_tsc_deadline(vlapic)) { vcpu_set_guest_msr(vlapic->vcpu, MSR_IA32_TSC_DEADLINE, val); timer = &vlapic->vtimer.timer; @@ -461,6 +467,8 @@ void vlapic_set_tsc_deadline_msr(struct acrn_vlapic *vlapic, uint64_t val_arg) } else { timer->fire_tsc = 0UL; } + } else { + /* No action required */ } } diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index c5df9763c..46496ab53 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -485,6 +485,23 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu) return err; } +/* + * If VMX_TSC_OFFSET_FULL is 0, no need to trap the write of IA32_TSC_DEADLINE because there is + * no offset between vTSC and pTSC, in this case, only write to vTSC_ADJUST is trapped. + */ +static void set_tsc_msr_intercept(struct acrn_vcpu *vcpu, bool intercept) +{ + uint8_t *msr_bitmap = vcpu->arch.msr_bitmap; + + if (!intercept) { + enable_msr_interception(msr_bitmap, MSR_IA32_TSC_DEADLINE, INTERCEPT_DISABLE); + enable_msr_interception(msr_bitmap, MSR_IA32_TSC_ADJUST, INTERCEPT_WRITE); + } else { + enable_msr_interception(msr_bitmap, MSR_IA32_TSC_DEADLINE, INTERCEPT_READ_WRITE); + enable_msr_interception(msr_bitmap, MSR_IA32_TSC_ADJUST, INTERCEPT_READ_WRITE); + } +} + /* * Intel SDM 17.17.3: If an execution of WRMSR to the * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the @@ -514,6 +531,8 @@ static void set_guest_tsc(struct acrn_vcpu *vcpu, uint64_t guest_tsc) /* write to VMCS because rdtsc and rdtscp are not intercepted */ exec_vmwrite64(VMX_TSC_OFFSET_FULL, tsc_delta); + + set_tsc_msr_intercept(vcpu, tsc_delta != 0UL); } /* @@ -561,6 +580,8 @@ static void set_guest_tsc_adjust(struct acrn_vcpu *vcpu, uint64_t tsc_adjust) /* IA32_TSC_ADJUST is supposed to carry the value it's written to */ vcpu_set_guest_msr(vcpu, MSR_IA32_TSC_ADJUST, tsc_adjust); + + set_tsc_msr_intercept(vcpu, (tsc_offset + tsc_adjust_delta ) != 0UL); } /** @@ -788,5 +809,5 @@ void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu) enable_msr_interception(msr_bitmap, MSR_IA32_EXT_XAPICID, INTERCEPT_READ); enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_LDR, INTERCEPT_READ); enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_ICR, INTERCEPT_WRITE); - enable_msr_interception(msr_bitmap, MSR_IA32_TSC_DEADLINE, INTERCEPT_DISABLE); + set_tsc_msr_intercept(vcpu, exec_vmread64(VMX_TSC_OFFSET_FULL) != 0UL); }