diff --git a/hypervisor/arch/x86/guest/trusty.c b/hypervisor/arch/x86/guest/trusty.c index 35d9fe5bf..7432001c5 100644 --- a/hypervisor/arch/x86/guest/trusty.c +++ b/hypervisor/arch/x86/guest/trusty.c @@ -143,17 +143,6 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem) } } -static inline void save_fxstore_guest_area(struct ext_context *ext_ctx) -{ - asm volatile("fxsave (%0)" - : : "r" (ext_ctx->fxstore_guest_area) : "memory"); -} - -static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx) -{ - asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area)); -} - static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx) { uint32_t i; diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index bc7157854..d8cd3012a 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -712,29 +712,42 @@ void resume_vcpu(struct acrn_vcpu *vcpu) release_schedule_lock(pcpu_id); } +/* TODO: + * Now we have switch_out and switch_in callbacks for each thread_object, and schedule + * will call them every thread switch. We can implement lazy context swtich , which + * only do context swtich when really need. + */ static void context_switch_out(struct thread_object *prev) { struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, thread_obj); + struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx); + + /* We don't flush TLB as we assume each vcpu has different vpid */ + ectx->ia32_star = msr_read(MSR_IA32_STAR); + ectx->ia32_lstar = msr_read(MSR_IA32_LSTAR); + ectx->ia32_fmask = msr_read(MSR_IA32_FMASK); + ectx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE); + + save_fxstore_guest_area(ectx); vcpu->running = false; - /* do prev vcpu context switch out */ - /* For now, we don't need to invalid ept. - * But if we have more than one vcpu on one pcpu, - * we need add ept invalid operation here. - */ } static void context_switch_in(struct thread_object *next) { struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, thread_obj); + struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx); + + switch_vmcs(vcpu); + + msr_write(MSR_IA32_STAR, ectx->ia32_star); + msr_write(MSR_IA32_LSTAR, ectx->ia32_lstar); + msr_write(MSR_IA32_FMASK, ectx->ia32_fmask); + msr_write(MSR_IA32_KERNEL_GS_BASE, ectx->ia32_kernel_gs_base); + + rstor_fxstore_guest_area(ectx); vcpu->running = true; - /* FIXME: - * Now, we don't need to load new vcpu VMCS because - * we only do switch between vcpu loop and idle loop. - * If we have more than one vcpu on on pcpu, need to - * add VMCS load operation here. - */ } void schedule_vcpu(struct acrn_vcpu *vcpu) diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index 9c40df056..2d87a5c15 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -509,9 +509,9 @@ static void do_init_vmcs(struct acrn_vcpu *vcpu) vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC); (void)memcpy_s(vcpu->arch.vmcs, 4U, (void *)&vmx_rev_id, 4U); - /* Execute VMCLEAR on previous un-clear VMCS */ - if (*vmcs_ptr != NULL) { - vmcs_pa = hva2hpa(*vmcs_ptr); + /* Execute VMCLEAR VMCS of this vcpu */ + if ((void *)vcpu->arch.vmcs != NULL) { + vmcs_pa = hva2hpa(vcpu->arch.vmcs); exec_vmclear((void *)&vmcs_pa); } @@ -543,6 +543,21 @@ void init_vmcs(struct acrn_vcpu *vcpu) } } +/** + * @pre vcpu != NULL + */ +void switch_vmcs(const struct acrn_vcpu *vcpu) +{ + uint64_t vmcs_pa; + void **vmcs_ptr = &get_cpu_var(vmcs_run); + + if (vcpu->launched && (*vmcs_ptr != (void *)vcpu->arch.vmcs)) { + vmcs_pa = hva2hpa(vcpu->arch.vmcs); + exec_vmptrld((void *)&vmcs_pa); + *vmcs_ptr = (void *)vcpu->arch.vmcs; + } +} + void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) { uint32_t value32; diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 97473c574..4b9ec2d15 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -538,6 +538,17 @@ static inline bool is_pae(struct acrn_vcpu *vcpu) return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL; } +static inline void save_fxstore_guest_area(struct ext_context *ext_ctx) +{ + asm volatile("fxsave (%0)" + : : "r" (ext_ctx->fxstore_guest_area) : "memory"); +} + +static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx) +{ + asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area)); +} + struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id); struct acrn_vcpu* get_ever_run_vcpu(uint16_t pcpu_id); diff --git a/hypervisor/include/arch/x86/guest/vmcs.h b/hypervisor/include/arch/x86/guest/vmcs.h index 53e5afd43..5bb94951a 100644 --- a/hypervisor/include/arch/x86/guest/vmcs.h +++ b/hypervisor/include/arch/x86/guest/vmcs.h @@ -41,6 +41,7 @@ static inline uint64_t apic_access_offset(uint64_t qual) return (qual & APIC_ACCESS_OFFSET); } void init_vmcs(struct acrn_vcpu *vcpu); +void switch_vmcs(const struct acrn_vcpu *vcpu); void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu); #endif /* ASSEMBLER */