mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-30 06:54:48 +00:00
hv: define 4 vcpu modes
move enum vm_cpu_mode to guest.h move enum vm_paging_mode to guest.h replace REAL_MODE with CPU_MODE_REAL replace PAGE_PROTECTED_MODE with CPU_MODE_64BIT Signed-off-by: Binbin Wu <binbin.wu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
cb262286c6
commit
744e09bc7e
@ -124,20 +124,6 @@ struct seg_desc {
|
||||
#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
|
||||
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
|
||||
|
||||
enum vm_cpu_mode {
|
||||
CPU_MODE_REAL,
|
||||
CPU_MODE_PROTECTED,
|
||||
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
|
||||
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
|
||||
};
|
||||
|
||||
enum vm_paging_mode {
|
||||
PAGING_MODE_FLAT,
|
||||
PAGING_MODE_32,
|
||||
PAGING_MODE_PAE,
|
||||
PAGING_MODE_64,
|
||||
};
|
||||
|
||||
struct vm_guest_paging {
|
||||
uint64_t cr3;
|
||||
int cpl;
|
||||
|
@ -265,9 +265,9 @@ void reset_vcpu(struct vcpu *vcpu)
|
||||
void init_vcpu(struct vcpu *vcpu)
|
||||
{
|
||||
if (is_vcpu_bsp(vcpu))
|
||||
vcpu->arch_vcpu.cpu_mode = PAGE_PROTECTED_MODE;
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
|
||||
else
|
||||
vcpu->arch_vcpu.cpu_mode = REAL_MODE;
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
|
||||
/* init_vmcs is delayed to vcpu vmcs launch first time */
|
||||
}
|
||||
|
||||
@ -336,9 +336,9 @@ int prepare_vcpu(struct vm *vm, int pcpu_id)
|
||||
if (!vm_sw_loader)
|
||||
vm_sw_loader = general_sw_loader;
|
||||
vm_sw_loader(vm, vcpu);
|
||||
vcpu->arch_vcpu.cpu_mode = PAGE_PROTECTED_MODE;
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
|
||||
} else {
|
||||
vcpu->arch_vcpu.cpu_mode = REAL_MODE;
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
|
||||
}
|
||||
|
||||
/* init_vmcs is delayed to vcpu vmcs launch first time */
|
||||
|
@ -1065,7 +1065,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
|
||||
if (--target_vcpu->arch_vcpu.nr_sipi > 0)
|
||||
return 0;
|
||||
|
||||
target_vcpu->arch_vcpu.cpu_mode = REAL_MODE;
|
||||
target_vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
|
||||
target_vcpu->arch_vcpu.sipi_vector = vec;
|
||||
pr_err("Start Secondary VCPU%d for VM[%d]...",
|
||||
target_vcpu->vcpu_id,
|
||||
|
@ -236,7 +236,7 @@ static int write_cr0(struct vcpu *vcpu, uint64_t value)
|
||||
* transition from real mode to paged-protected mode
|
||||
*/
|
||||
if (!is_vcpu_bsp(vcpu) &&
|
||||
(vcpu->arch_vcpu.cpu_mode == REAL_MODE) &&
|
||||
(vcpu->arch_vcpu.cpu_mode == CPU_MODE_REAL) &&
|
||||
(value & CR0_PG) && (value & CR0_PE)) {
|
||||
/* Enable protected mode */
|
||||
value32 = exec_vmread(VMX_ENTRY_CONTROLS);
|
||||
|
@ -264,12 +264,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
/* Setup guest control register values */
|
||||
/* Set up guest CRO field */
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/*cur_context->cr0 = (CR0_CD | CR0_NW | CR0_ET | CR0_NE);*/
|
||||
cur_context->cr0 = CR0_ET | CR0_NE;
|
||||
cur_context->cr3 = 0;
|
||||
cur_context->cr4 = CR4_VMXE;
|
||||
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
cur_context->cr0 = ((uint64_t)CR0_PG | CR0_PE | CR0_NE);
|
||||
cur_context->cr4 = ((uint64_t)CR4_PSE | CR4_PAE | CR4_MCE | CR4_VMXE);
|
||||
cur_context->cr3 = vm->arch_vm.guest_init_pml4 | CR3_PWT;
|
||||
@ -303,7 +303,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/***************************************************/
|
||||
/* Set Code Segment - CS */
|
||||
/***************************************************/
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/* AP is initialized with real mode
|
||||
* and CS value is left shift 8 bits from sipi vector;
|
||||
*/
|
||||
@ -343,7 +343,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/***************************************************/
|
||||
/* Set up guest instruction pointer */
|
||||
field = VMX_GUEST_RIP;
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE)
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
|
||||
value32 = 0;
|
||||
else
|
||||
value32 = (uint32_t) ((uint64_t) vcpu->entry_addr & 0xFFFFFFFF);
|
||||
@ -351,7 +351,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("GUEST RIP on VMEntry %x ", value32);
|
||||
exec_vmwrite(field, value32);
|
||||
|
||||
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
/* Set up guest stack pointer to 0 */
|
||||
field = VMX_GUEST_RSP;
|
||||
value32 = 0;
|
||||
@ -365,13 +365,13 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/***************************************************/
|
||||
|
||||
/* GDTR - Global Descriptor Table */
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/* Base */
|
||||
base = 0;
|
||||
|
||||
/* Limit */
|
||||
limit = 0xFFFF;
|
||||
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
descriptor_table gdtb = {0, 0};
|
||||
|
||||
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */
|
||||
@ -400,13 +400,13 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit);
|
||||
|
||||
/* IDTR - Interrupt Descriptor Table */
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/* Base */
|
||||
base = 0;
|
||||
|
||||
/* Limit */
|
||||
limit = 0xFFFF;
|
||||
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
descriptor_table idtb = {0, 0};
|
||||
|
||||
/* TODO: Should guest IDTR point to host IDTR ? */
|
||||
@ -444,11 +444,11 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
/* ES, CS, SS, DS, FS, GS */
|
||||
/***************************************************/
|
||||
data32_idx = 0x10;
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
es = ss = ds = fs = gs = data32_idx;
|
||||
limit = 0xffff;
|
||||
|
||||
} else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
asm volatile ("movw %%es, %%ax":"=a" (es));
|
||||
asm volatile ("movw %%ss, %%ax":"=a" (ss));
|
||||
asm volatile ("movw %%ds, %%ax":"=a" (ds));
|
||||
@ -496,9 +496,9 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
|
||||
|
||||
/* Access */
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE)
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
|
||||
value32 = 0x0093;
|
||||
else if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE)
|
||||
else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
|
||||
value32 = 0xc093;
|
||||
|
||||
field = VMX_GUEST_ES_ATTR;
|
||||
@ -609,7 +609,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ",
|
||||
value64);
|
||||
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/* Disable long mode (clear IA32_EFER.LME) in VMCS IA32_EFER
|
||||
* MSR
|
||||
*/
|
||||
@ -1024,7 +1024,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
|
||||
fixed1 = msr_read(MSR_IA32_VMX_CR0_FIXED1);
|
||||
|
||||
if (get_vcpu_mode(vcpu) == REAL_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
|
||||
/* Check to see if unrestricted guest support is available */
|
||||
if (msr_read(MSR_IA32_VMX_MISC) & (1 << 5)) {
|
||||
/* Adjust fixed bits as they can/will reflect incorrect
|
||||
@ -1047,7 +1047,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
|
||||
}
|
||||
|
||||
/* (get_vcpu_mode(vcpu) == REAL_MODE) */
|
||||
/* (get_vcpu_mode(vcpu) == CPU_MODE_REAL) */
|
||||
/* Output fixed CR0 values */
|
||||
pr_dbg("Fixed0 CR0 value: 0x%x", fixed0);
|
||||
pr_dbg("Fixed1 CR0 value: 0x%x", fixed1);
|
||||
@ -1129,7 +1129,7 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
|
||||
* IA32_PAT and IA32_EFER
|
||||
*/
|
||||
value32 = msr_read(MSR_IA32_VMX_ENTRY_CTLS);
|
||||
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE)
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
|
||||
value32 |= (VMX_ENTRY_CTLS_IA32E_MODE);
|
||||
|
||||
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
|
||||
@ -1197,7 +1197,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
||||
struct run_context *cur_context =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
|
||||
if (get_vcpu_mode(vcpu) == PAGE_PROTECTED_MODE) {
|
||||
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
|
||||
/* Set up guest CR0 field */
|
||||
field = VMX_GUEST_CR0;
|
||||
cur_context->cr0 = efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE;
|
||||
|
@ -91,6 +91,20 @@ struct vm_lu_mem_map {
|
||||
uint64_t size; /* Size of map */
|
||||
};
|
||||
|
||||
enum vm_cpu_mode {
|
||||
CPU_MODE_REAL,
|
||||
CPU_MODE_PROTECTED,
|
||||
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
|
||||
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
|
||||
};
|
||||
|
||||
enum vm_paging_mode {
|
||||
PAGING_MODE_FLAT,
|
||||
PAGING_MODE_32,
|
||||
PAGING_MODE_PAE,
|
||||
PAGING_MODE_64,
|
||||
};
|
||||
|
||||
/*
|
||||
* VM related APIs
|
||||
*/
|
||||
|
@ -453,10 +453,6 @@
|
||||
CR4_VMXE | CR4_SMXE | CR4_PGE | CR4_PCIDE)
|
||||
#define CR4_READ_SHADOW (CR4_PGE | CR4_PSE)
|
||||
|
||||
/* VCPU config definitions */
|
||||
#define REAL_MODE 1
|
||||
#define PAGE_PROTECTED_MODE 2
|
||||
|
||||
/* External Interfaces */
|
||||
int exec_vmxon_instr(void);
|
||||
uint64_t exec_vmread(uint32_t field);
|
||||
|
Loading…
Reference in New Issue
Block a user