hv: define 4 vcpu modes

move enum vm_cpu_mode to guest.h
move enum vm_paging_mode to guest.h
replace REAL_MODE with CPU_MODE_REAL
replace PAGE_PROTECTED_MODE with CPU_MODE_64BIT

Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu 2018-05-15 11:28:16 +08:00 committed by lijinxia
parent cb262286c6
commit 744e09bc7e
7 changed files with 38 additions and 42 deletions

View File

@ -124,20 +124,6 @@ struct seg_desc {
#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0) #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0) #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
enum vm_paging_mode {
PAGING_MODE_FLAT,
PAGING_MODE_32,
PAGING_MODE_PAE,
PAGING_MODE_64,
};
struct vm_guest_paging { struct vm_guest_paging {
uint64_t cr3; uint64_t cr3;
int cpl; int cpl;

View File

@ -265,9 +265,9 @@ void reset_vcpu(struct vcpu *vcpu)
void init_vcpu(struct vcpu *vcpu) void init_vcpu(struct vcpu *vcpu)
{ {
if (is_vcpu_bsp(vcpu)) if (is_vcpu_bsp(vcpu))
vcpu->arch_vcpu.cpu_mode = PAGE_PROTECTED_MODE; vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
else else
vcpu->arch_vcpu.cpu_mode = REAL_MODE; vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
/* init_vmcs is delayed to vcpu vmcs launch first time */ /* init_vmcs is delayed to vcpu vmcs launch first time */
} }
@ -336,9 +336,9 @@ int prepare_vcpu(struct vm *vm, int pcpu_id)
if (!vm_sw_loader) if (!vm_sw_loader)
vm_sw_loader = general_sw_loader; vm_sw_loader = general_sw_loader;
vm_sw_loader(vm, vcpu); vm_sw_loader(vm, vcpu);
vcpu->arch_vcpu.cpu_mode = PAGE_PROTECTED_MODE; vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
} else { } else {
vcpu->arch_vcpu.cpu_mode = REAL_MODE; vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
} }
/* init_vmcs is delayed to vcpu vmcs launch first time */ /* init_vmcs is delayed to vcpu vmcs launch first time */

View File

@ -1065,7 +1065,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
if (--target_vcpu->arch_vcpu.nr_sipi > 0) if (--target_vcpu->arch_vcpu.nr_sipi > 0)
return 0; return 0;
target_vcpu->arch_vcpu.cpu_mode = REAL_MODE; target_vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
target_vcpu->arch_vcpu.sipi_vector = vec; target_vcpu->arch_vcpu.sipi_vector = vec;
pr_err("Start Secondary VCPU%d for VM[%d]...", pr_err("Start Secondary VCPU%d for VM[%d]...",
target_vcpu->vcpu_id, target_vcpu->vcpu_id,

View File

@ -236,7 +236,7 @@ static int write_cr0(struct vcpu *vcpu, uint64_t value)
* transition from real mode to paged-protected mode * transition from real mode to paged-protected mode
*/ */
if (!is_vcpu_bsp(vcpu) && if (!is_vcpu_bsp(vcpu) &&
(vcpu->arch_vcpu.cpu_mode == REAL_MODE) && (vcpu->arch_vcpu.cpu_mode == CPU_MODE_REAL) &&
(value & CR0_PG) && (value & CR0_PE)) { (value & CR0_PG) && (value & CR0_PE)) {
/* Enable protected mode */ /* Enable protected mode */
value32 = exec_vmread(VMX_ENTRY_CONTROLS); value32 = exec_vmread(VMX_ENTRY_CONTROLS);

View File

@ -264,12 +264,12 @@ static void init_guest_state(struct vcpu *vcpu)
/* Setup guest control register values */ /* Setup guest control register values */
/* Set up guest CRO field */ /* Set up guest CRO field */
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/*cur_context->cr0 = (CR0_CD | CR0_NW | CR0_ET | CR0_NE);*/ /*cur_context->cr0 = (CR0_CD | CR0_NW | CR0_ET | CR0_NE);*/
cur_context->cr0 = CR0_ET | CR0_NE; cur_context->cr0 = CR0_ET | CR0_NE;
cur_context->cr3 = 0; cur_context->cr3 = 0;
cur_context->cr4 = CR4_VMXE; cur_context->cr4 = CR4_VMXE;
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { } else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
cur_context->cr0 = ((uint64_t)CR0_PG | CR0_PE | CR0_NE); cur_context->cr0 = ((uint64_t)CR0_PG | CR0_PE | CR0_NE);
cur_context->cr4 = ((uint64_t)CR4_PSE | CR4_PAE | CR4_MCE | CR4_VMXE); cur_context->cr4 = ((uint64_t)CR4_PSE | CR4_PAE | CR4_MCE | CR4_VMXE);
cur_context->cr3 = vm->arch_vm.guest_init_pml4 | CR3_PWT; cur_context->cr3 = vm->arch_vm.guest_init_pml4 | CR3_PWT;
@ -303,7 +303,7 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/ /***************************************************/
/* Set Code Segment - CS */ /* Set Code Segment - CS */
/***************************************************/ /***************************************************/
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/* AP is initialized with real mode /* AP is initialized with real mode
* and CS value is left shift 8 bits from sipi vector; * and CS value is left shift 8 bits from sipi vector;
*/ */
@ -343,7 +343,7 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/ /***************************************************/
/* Set up guest instruction pointer */ /* Set up guest instruction pointer */
field = VMX_GUEST_RIP; field = VMX_GUEST_RIP;
if (get_vcpu_mode(vcpu) == REAL_MODE) if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
value32 = 0; value32 = 0;
else else
value32 = (uint32_t) ((uint64_t) vcpu->entry_addr & 0xFFFFFFFF); value32 = (uint32_t) ((uint64_t) vcpu->entry_addr & 0xFFFFFFFF);
@ -351,7 +351,7 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("GUEST RIP on VMEntry %x ", value32); pr_dbg("GUEST RIP on VMEntry %x ", value32);
exec_vmwrite(field, value32); exec_vmwrite(field, value32);
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
/* Set up guest stack pointer to 0 */ /* Set up guest stack pointer to 0 */
field = VMX_GUEST_RSP; field = VMX_GUEST_RSP;
value32 = 0; value32 = 0;
@ -365,13 +365,13 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/ /***************************************************/
/* GDTR - Global Descriptor Table */ /* GDTR - Global Descriptor Table */
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/* Base */ /* Base */
base = 0; base = 0;
/* Limit */ /* Limit */
limit = 0xFFFF; limit = 0xFFFF;
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { } else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
descriptor_table gdtb = {0, 0}; descriptor_table gdtb = {0, 0};
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */ /* Base *//* TODO: Should guest GDTB point to host GDTB ? */
@ -400,13 +400,13 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit); pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit);
/* IDTR - Interrupt Descriptor Table */ /* IDTR - Interrupt Descriptor Table */
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/* Base */ /* Base */
base = 0; base = 0;
/* Limit */ /* Limit */
limit = 0xFFFF; limit = 0xFFFF;
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { } else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
descriptor_table idtb = {0, 0}; descriptor_table idtb = {0, 0};
/* TODO: Should guest IDTR point to host IDTR ? */ /* TODO: Should guest IDTR point to host IDTR ? */
@ -444,11 +444,11 @@ static void init_guest_state(struct vcpu *vcpu)
/* ES, CS, SS, DS, FS, GS */ /* ES, CS, SS, DS, FS, GS */
/***************************************************/ /***************************************************/
data32_idx = 0x10; data32_idx = 0x10;
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
es = ss = ds = fs = gs = data32_idx; es = ss = ds = fs = gs = data32_idx;
limit = 0xffff; limit = 0xffff;
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { } else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
asm volatile ("movw %%es, %%ax":"=a" (es)); asm volatile ("movw %%es, %%ax":"=a" (es));
asm volatile ("movw %%ss, %%ax":"=a" (ss)); asm volatile ("movw %%ss, %%ax":"=a" (ss));
asm volatile ("movw %%ds, %%ax":"=a" (ds)); asm volatile ("movw %%ds, %%ax":"=a" (ds));
@ -496,9 +496,9 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit); pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
/* Access */ /* Access */
if (get_vcpu_mode(vcpu) == REAL_MODE) if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
value32 = 0x0093; value32 = 0x0093;
else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
value32 = 0xc093; value32 = 0xc093;
field = VMX_GUEST_ES_ATTR; field = VMX_GUEST_ES_ATTR;
@ -609,7 +609,7 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ", pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ",
value64); value64);
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/* Disable long mode (clear IA32_EFER.LME) in VMCS IA32_EFER /* Disable long mode (clear IA32_EFER.LME) in VMCS IA32_EFER
* MSR * MSR
*/ */
@ -1024,7 +1024,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0); fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
fixed1 = msr_read(MSR_IA32_VMX_CR0_FIXED1); fixed1 = msr_read(MSR_IA32_VMX_CR0_FIXED1);
if (get_vcpu_mode(vcpu) == REAL_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
/* Check to see if unrestricted guest support is available */ /* Check to see if unrestricted guest support is available */
if (msr_read(MSR_IA32_VMX_MISC) & (1 << 5)) { if (msr_read(MSR_IA32_VMX_MISC) & (1 << 5)) {
/* Adjust fixed bits as they can/will reflect incorrect /* Adjust fixed bits as they can/will reflect incorrect
@ -1047,7 +1047,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
} }
/* (get_vcpu_mode(vcpu) == REAL_MODE) */ /* (get_vcpu_mode(vcpu) == CPU_MODE_REAL) */
/* Output fixed CR0 values */ /* Output fixed CR0 values */
pr_dbg("Fixed0 CR0 value: 0x%x", fixed0); pr_dbg("Fixed0 CR0 value: 0x%x", fixed0);
pr_dbg("Fixed1 CR0 value: 0x%x", fixed1); pr_dbg("Fixed1 CR0 value: 0x%x", fixed1);
@ -1129,7 +1129,7 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
* IA32_PAT and IA32_EFER * IA32_PAT and IA32_EFER
*/ */
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS); value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE); value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER | value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
@ -1197,7 +1197,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
struct run_context *cur_context = struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context]; &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) { if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
/* Set up guest CR0 field */ /* Set up guest CR0 field */
field = VMX_GUEST_CR0; field = VMX_GUEST_CR0;
cur_context->cr0 = efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE; cur_context->cr0 = efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE;

View File

@ -91,6 +91,20 @@ struct vm_lu_mem_map {
uint64_t size; /* Size of map */ uint64_t size; /* Size of map */
}; };
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
enum vm_paging_mode {
PAGING_MODE_FLAT,
PAGING_MODE_32,
PAGING_MODE_PAE,
PAGING_MODE_64,
};
/* /*
* VM related APIs * VM related APIs
*/ */

View File

@ -453,10 +453,6 @@
CR4_VMXE | CR4_SMXE | CR4_PGE | CR4_PCIDE) CR4_VMXE | CR4_SMXE | CR4_PGE | CR4_PCIDE)
#define CR4_READ_SHADOW (CR4_PGE | CR4_PSE) #define CR4_READ_SHADOW (CR4_PGE | CR4_PSE)
/* VCPU config definitions */
#define REAL_MODE 1
#define PAGE_PROTECTED_MODE 2
/* External Interfaces */ /* External Interfaces */
int exec_vmxon_instr(void); int exec_vmxon_instr(void);
uint64_t exec_vmread(uint32_t field); uint64_t exec_vmread(uint32_t field);