From b6aaf1b8d91301a9616d9be0e635406666c0b261 Mon Sep 17 00:00:00 2001 From: Zide Chen Date: Thu, 6 Dec 2018 10:44:06 -0800 Subject: [PATCH] hv: MSRs may need isolation between normal and secure world - implement unified APIs to access guest_msrs[] under struct acrn_vcpu. - use these new APIs to read/write emulated TSC_DEADLINE MSR - switch world_msrs[] and guest_msrs[] during world switch for MSRs that need world isolation - remove the old guest_msrs[] array and it's index macros. Tracked-On: #1867 Signed-off-by: Zide Chen Acked-by: Eddie Dong --- hypervisor/arch/x86/guest/vcpu.c | 21 +++++++++++++++++++ hypervisor/arch/x86/guest/vlapic.c | 4 ++-- hypervisor/arch/x86/guest/vmsr.c | 18 ++++++++++++++++ hypervisor/arch/x86/trusty.c | 22 ++++++++++++++++++++ hypervisor/include/arch/x86/guest/guest.h | 11 ++-------- hypervisor/include/arch/x86/guest/vcpu.h | 25 ++++++++++++++++++++++- 6 files changed, 89 insertions(+), 12 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index dbdb71ba5..a0c6ec6da 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -158,6 +158,27 @@ inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val) = val; } +uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr) +{ + uint32_t index = vmsr_get_guest_msr_index(msr); + uint64_t val = 0UL; + + if (index < NUM_GUEST_MSRS) { + val = vcpu->arch.guest_msrs[index]; + } + + return val; +} + +void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val) +{ + uint32_t index = vmsr_get_guest_msr_index(msr); + + if (index < NUM_GUEST_MSRS) { + vcpu->arch.guest_msrs[index] = val; + } +} + struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id) { return per_cpu(ever_run_vcpu, pcpu_id); diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 6c6a5221f..614c22abe 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -408,7 +408,7 @@ static uint64_t vlapic_get_tsc_deadline_msr(const struct acrn_vlapic *vlapic) ret = 0UL; } else { ret = (vlapic->vtimer.timer.fire_tsc == 0UL) ? 0UL : - vlapic->vcpu->guest_msrs[IDX_TSC_DEADLINE]; + vcpu_get_guest_msr(vlapic->vcpu, MSR_IA32_TSC_DEADLINE); } return ret; @@ -422,7 +422,7 @@ static void vlapic_set_tsc_deadline_msr(struct acrn_vlapic *vlapic, uint64_t val = val_arg; if (vlapic_lvtt_tsc_deadline(vlapic)) { - vlapic->vcpu->guest_msrs[IDX_TSC_DEADLINE] = val; + vcpu_set_guest_msr(vlapic->vcpu, MSR_IA32_TSC_DEADLINE, val); timer = &vlapic->vtimer.timer; del_timer(timer); diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index e3ddfa26f..50191b6ff 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -201,6 +201,24 @@ static const uint32_t x2apic_msrs[NUM_X2APIC_MSRS] = { MSR_IA32_EXT_APIC_SELF_IPI, }; +/* emulated_guest_msrs[] shares same indexes with array vcpu->arch->guest_msrs[] */ +uint32_t vmsr_get_guest_msr_index(uint32_t msr) +{ + uint32_t index; + + for (index = 0U; index < NUM_GUEST_MSRS; index++) { + if (emulated_guest_msrs[index] == msr) { + break; + } + } + + if (index == NUM_GUEST_MSRS) { + pr_err("%s, MSR %x is not defined in array emulated_guest_msrs[]", __func__, msr); + } + + return index; +} + static void enable_msr_interception(uint8_t *bitmap, uint32_t msr_arg, enum rw_mode mode) { uint8_t *read_map; diff --git a/hypervisor/arch/x86/trusty.c b/hypervisor/arch/x86/trusty.c index ac1509506..828849d62 100644 --- a/hypervisor/arch/x86/trusty.c +++ b/hypervisor/arch/x86/trusty.c @@ -158,6 +158,8 @@ static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx) static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx) { + uint32_t i; + /* cache on-demand run_context for efer/rflags/rsp/rip */ (void)vcpu_get_efer(vcpu); (void)vcpu_get_rflags(vcpu); @@ -207,10 +209,17 @@ static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx) /* FX area */ save_fxstore_guest_area(ext_ctx); + + /* For MSRs need isolation between worlds */ + for (i = 0U; i < NUM_WORLD_MSRS; i++) { + vcpu->arch.contexts[vcpu->arch.cur_context].world_msrs[i] = vcpu->arch.guest_msrs[i]; + } } static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx) { + uint32_t i; + /* mark to update on-demand run_context for efer/rflags/rsp */ bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated); bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated); @@ -254,6 +263,11 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext /* FX area */ rstor_fxstore_guest_area(ext_ctx); + + /* For MSRs need isolation between worlds */ + for (i = 0U; i < NUM_WORLD_MSRS; i++) { + vcpu->arch.guest_msrs[i] = vcpu->arch.contexts[!vcpu->arch.cur_context].world_msrs[i]; + } } static void copy_smc_param(const struct run_context *prev_ctx, @@ -404,6 +418,8 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu, uint64_t base_hpa, uint32_t size) { + uint32_t i; + vcpu->arch.inst_len = 0U; vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa; vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp = @@ -413,6 +429,12 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu, vcpu->arch.contexts[SECURE_WORLD].ext_ctx.ia32_pat = vcpu->arch.contexts[NORMAL_WORLD].ext_ctx.ia32_pat; + /* Init per world MSRs */ + for (i = 0U; i < NUM_WORLD_MSRS; i++) { + vcpu->arch.contexts[NORMAL_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; + vcpu->arch.contexts[SECURE_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; + } + return setup_trusty_info(vcpu, size, base_hpa); } diff --git a/hypervisor/include/arch/x86/guest/guest.h b/hypervisor/include/arch/x86/guest/guest.h index 8104ad928..42b0bade1 100644 --- a/hypervisor/include/arch/x86/guest/guest.h +++ b/hypervisor/include/arch/x86/guest/guest.h @@ -28,15 +28,6 @@ (idx)++, vcpu = &(vm->hw.vcpu_array[(idx)])) \ if (vcpu->state != VCPU_OFFLINE) -/* the index is matched with emulated msrs array*/ -#define IDX_TSC_DEADLINE 0U -#define IDX_BIOS_UPDT_TRIG (IDX_TSC_DEADLINE + 1U) -#define IDX_BIOS_SIGN_ID (IDX_BIOS_UPDT_TRIG + 1U) -#define IDX_TSC (IDX_BIOS_SIGN_ID + 1U) -#define IDX_PAT (IDX_TSC + 1U) -#define IDX_APIC_BASE (IDX_PAT + 1U) -#define IDX_MAX_MSR (IDX_APIC_BASE + 1U) - /* * VCPU related APIs */ @@ -139,6 +130,8 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu); int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu); void init_msr_emulation(struct acrn_vcpu *vcpu); +uint32_t vmsr_get_guest_msr_index(uint32_t msr); + struct run_context; int vmx_vmrun(struct run_context *context, int ops, int ibrs); diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 9bb3cbcb7..baa8604b0 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -268,7 +268,6 @@ struct acrn_vcpu { struct io_request req; /* used by io/ept emulation */ - uint64_t guest_msrs[IDX_MAX_MSR]; #ifdef CONFIG_MTRR_ENABLED struct mtrr_state mtrr; #endif /* CONFIG_MTRR_ENABLED */ @@ -474,6 +473,30 @@ void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val); uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu); void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val); +/** + * @brief get guest emulated MSR + * + * Get the content of emulated guest MSR + * + * @param[in] vcpu pointer to vcpu data structure + * @param[in] msr the guest MSR + * + * @return the value of emulated MSR. + */ +uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr); + +/** + * @brief set guest emulated MSR + * + * Update the content of emulated guest MSR + * + * @param[in] vcpu pointer to vcpu data structure + * @param[in] msr the guest MSR + * @param[in] val the value to set the target MSR + * + */ +void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val); + /** * @brief set all the vcpu registers *