hv: sched: support vcpu context switch on one pcpu

To support cpu sharing, multiple vcpu can run on same pcpu. We need do
necessary vcpu context switch. This patch add below actions in context
switch.
  1) fxsave/fxrstor;
  2) save/restore MSRs: MSR_IA32_STAR, MSR_IA32_LSTAR,
	MSR_IA32_FMASK, MSR_IA32_KERNEL_GS_BASE;
  3) switch vmcs.

Tracked-On: #3813
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Signed-off-by: Yu Wang <yu1.wang@intel.com>
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2019-06-12 15:56:25 +08:00 committed by ACRN System Integration
parent 7e66c0d4fa
commit dadcdcefa0
5 changed files with 54 additions and 25 deletions

View File

@ -143,17 +143,6 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
}
}
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
{
asm volatile("fxsave (%0)"
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
{
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
}
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
{
uint32_t i;

View File

@ -712,29 +712,42 @@ void resume_vcpu(struct acrn_vcpu *vcpu)
release_schedule_lock(pcpu_id);
}
/* TODO:
* Now we have switch_out and switch_in callbacks for each thread_object, and schedule
* will call them every thread switch. We can implement lazy context swtich , which
* only do context swtich when really need.
*/
static void context_switch_out(struct thread_object *prev)
{
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, thread_obj);
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
/* We don't flush TLB as we assume each vcpu has different vpid */
ectx->ia32_star = msr_read(MSR_IA32_STAR);
ectx->ia32_lstar = msr_read(MSR_IA32_LSTAR);
ectx->ia32_fmask = msr_read(MSR_IA32_FMASK);
ectx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
save_fxstore_guest_area(ectx);
vcpu->running = false;
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
* we need add ept invalid operation here.
*/
}
static void context_switch_in(struct thread_object *next)
{
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, thread_obj);
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
switch_vmcs(vcpu);
msr_write(MSR_IA32_STAR, ectx->ia32_star);
msr_write(MSR_IA32_LSTAR, ectx->ia32_lstar);
msr_write(MSR_IA32_FMASK, ectx->ia32_fmask);
msr_write(MSR_IA32_KERNEL_GS_BASE, ectx->ia32_kernel_gs_base);
rstor_fxstore_guest_area(ectx);
vcpu->running = true;
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.
* If we have more than one vcpu on on pcpu, need to
* add VMCS load operation here.
*/
}
void schedule_vcpu(struct acrn_vcpu *vcpu)

View File

@ -509,9 +509,9 @@ static void do_init_vmcs(struct acrn_vcpu *vcpu)
vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC);
(void)memcpy_s(vcpu->arch.vmcs, 4U, (void *)&vmx_rev_id, 4U);
/* Execute VMCLEAR on previous un-clear VMCS */
if (*vmcs_ptr != NULL) {
vmcs_pa = hva2hpa(*vmcs_ptr);
/* Execute VMCLEAR VMCS of this vcpu */
if ((void *)vcpu->arch.vmcs != NULL) {
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmclear((void *)&vmcs_pa);
}
@ -543,6 +543,21 @@ void init_vmcs(struct acrn_vcpu *vcpu)
}
}
/**
* @pre vcpu != NULL
*/
void switch_vmcs(const struct acrn_vcpu *vcpu)
{
uint64_t vmcs_pa;
void **vmcs_ptr = &get_cpu_var(vmcs_run);
if (vcpu->launched && (*vmcs_ptr != (void *)vcpu->arch.vmcs)) {
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmptrld((void *)&vmcs_pa);
*vmcs_ptr = (void *)vcpu->arch.vmcs;
}
}
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;

View File

@ -538,6 +538,17 @@ static inline bool is_pae(struct acrn_vcpu *vcpu)
return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL;
}
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
{
asm volatile("fxsave (%0)"
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
{
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
}
struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id);
struct acrn_vcpu* get_ever_run_vcpu(uint16_t pcpu_id);

View File

@ -41,6 +41,7 @@ static inline uint64_t apic_access_offset(uint64_t qual)
return (qual & APIC_ACCESS_OFFSET);
}
void init_vmcs(struct acrn_vcpu *vcpu);
void switch_vmcs(const struct acrn_vcpu *vcpu);
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu);
#endif /* ASSEMBLER */