modulization: vmx on/off should not use vcpu param

vmx.c should only take care host vmx operations, which should not
contain vcpu reference, so refine vmx on/off APIs, move out use
of vcpu by adding one per cpu vmcs_run pointer.

as now each pcpu only run on one vcpu, so just keep running vmcs
in per cpu vmcs_run pointer is enough.

Changes to be committed:
	modified:   arch/x86/cpu.c
	modified:   arch/x86/init.c
	modified:   arch/x86/pm.c
	modified:   arch/x86/vmcs.c
	modified:   arch/x86/vmx.c
	modified:   include/arch/x86/per_cpu.h
	modified:   include/arch/x86/vmx.h

Tracked-On: #1842
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-12-12 20:48:57 +08:00 committed by wenlingz
parent bed82dd3f8
commit 36863a0b54
7 changed files with 37 additions and 32 deletions

View File

@ -607,7 +607,7 @@ void cpu_dead(void)
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */
vmx_off(pcpu_id);
vmx_off();
cache_flush_invalidate_all();
/* Set state to show CPU is dead */

View File

@ -59,7 +59,7 @@ static void init_guest(void)
/*TODO: move into guest-vcpu module */
static void enter_guest_mode(uint16_t pcpu_id)
{
exec_vmxon_instr(pcpu_id);
vmx_on();
#ifdef CONFIG_PARTITION_MODE
(void)prepare_vm(pcpu_id);

View File

@ -124,15 +124,12 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
{
uint64_t pmain_entry_saved;
uint32_t guest_wakeup_vec32;
uint16_t pcpu_id;
/* We assume enter s3 success by default */
host_enter_s3_success = 1U;
if (vm->pm.sx_state_data != NULL) {
pause_vm(vm); /* pause vm0 before suspend system */
pcpu_id = get_cpu_id();
stac();
/* Save the wakeup vec set by guest. Will return to guest
* with this wakeup vec as entry.
@ -159,7 +156,7 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
clac();
CPU_IRQ_DISABLE();
vmx_off(pcpu_id);
vmx_off();
suspend_console();
suspend_ioapic();
@ -173,7 +170,7 @@ void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
resume_ioapic();
resume_console();
exec_vmxon_instr(pcpu_id);
vmx_on();
CPU_IRQ_ENABLE();
/* restore the default main entry */

View File

@ -873,6 +873,7 @@ void init_vmcs(struct acrn_vcpu *vcpu)
{
uint64_t vmx_rev_id;
uint64_t vmcs_pa;
void **vmcs_ptr = &get_cpu_var(vmcs_run);
/* Log message */
pr_dbg("Initializing VMCS");
@ -881,12 +882,16 @@ void init_vmcs(struct acrn_vcpu *vcpu)
vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC);
(void)memcpy_s(vcpu->arch.vmcs, 4U, (void *)&vmx_rev_id, 4U);
/* Execute VMCLEAR on current VMCS */
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmclear((void *)&vmcs_pa);
/* Execute VMCLEAR on previous un-clear VMCS */
if (*vmcs_ptr != NULL) {
vmcs_pa = hva2hpa(*vmcs_ptr);
exec_vmclear((void *)&vmcs_pa);
}
/* Load VMCS pointer */
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmptrld((void *)&vmcs_pa);
*vmcs_ptr = (void *)vcpu->arch.vmcs;
/* Initialize the Virtual Machine Control Structure (VMCS) */
init_host_state();

View File

@ -32,14 +32,14 @@ static inline void exec_vmxon(void *addr)
/* Per cpu data to hold the vmxon_region for each pcpu.
* It will be used again when we start a pcpu after the pcpu was down.
* S3 enter/exit will use it.
* Only run on current pcpu.
*/
void exec_vmxon_instr(uint16_t pcpu_id)
void vmx_on(void)
{
uint64_t tmp64, vmcs_pa;
uint64_t tmp64;
uint32_t tmp32;
void *vmxon_region_va = (void *)per_cpu(vmxon_region, pcpu_id);
void *vmxon_region_va = (void *)get_cpu_var(vmxon_region);
uint64_t vmxon_region_pa;
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
/* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */
tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
@ -65,9 +65,6 @@ void exec_vmxon_instr(uint16_t pcpu_id)
/* Turn ON VMX */
vmxon_region_pa = hva2hpa(vmxon_region_va);
exec_vmxon(&vmxon_region_pa);
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmptrld(&vmcs_pa);
}
static inline void exec_vmxoff(void)
@ -75,18 +72,6 @@ static inline void exec_vmxoff(void)
asm volatile ("vmxoff" : : : "memory");
}
void vmx_off(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
uint64_t vmcs_pa;
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmclear((void *)&vmcs_pa);
exec_vmxoff();
}
/**
* @pre addr != NULL && addr is 4KB-aligned
* @pre addr != VMXON pointer
@ -122,6 +107,24 @@ void exec_vmptrld(void *addr)
: "cc", "memory");
}
/**
* only run on current pcpu
*/
void vmx_off(void)
{
void **vmcs_ptr = &get_cpu_var(vmcs_run);
if (*vmcs_ptr != NULL) {
uint64_t vmcs_pa;
vmcs_pa = hva2hpa(*vmcs_ptr);
exec_vmclear((void *)&vmcs_pa);
*vmcs_ptr = NULL;
}
exec_vmxoff();
}
uint64_t exec_vmread64(uint32_t field_full)
{
uint64_t value;

View File

@ -22,6 +22,7 @@
struct per_cpu_region {
/* vmxon_region MUST be 4KB-aligned */
uint8_t vmxon_region[PAGE_SIZE];
void *vmcs_run;
#ifdef HV_DEBUG
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
char logbuf[LOG_MESSAGE_MAX_SIZE];

View File

@ -378,9 +378,8 @@
#define VMX_INT_TYPE_SW_EXP 6U
/* External Interfaces */
void exec_vmxon_instr(uint16_t pcpu_id);
void vmx_off(uint16_t pcpu_id);
void vmx_on(void);
void vmx_off(void);
/**
* Read field from VMCS.