hv: init: unify init logic for vm0 bsp

In current code, VM0 BSP start mode is hardcoded, in this patch VM0 BSP
start mode is decided by the boot context prepared by bootloader/BIOS.

In current code, VM0 BSP VMCS is override only on UEFI platform.
In this patch, VM0 BSP VMCS is override on both SBL & UEFI platforms.

Also restructure the code of guest init code.
In this patch, a vcpu run_context is initilaized first according to vcpu mode.
Then write the value to vmcs according to run_context value.

Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu 2018-07-28 23:03:45 +08:00 committed by lijinxia
parent 4acce9338a
commit 33e1149b29
8 changed files with 259 additions and 642 deletions

View File

@ -6,11 +6,7 @@
#include <hypervisor.h>
#include <schedule.h>
#ifdef CONFIG_EFI_STUB
#include <vm0_boot.h>
extern struct efi_ctx* efi_ctx;
#endif
vm_sw_loader_t vm_sw_loader;
@ -503,14 +499,18 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
if (!vm_sw_loader)
vm_sw_loader = general_sw_loader;
if (is_vm0(vcpu->vm)) {
vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED;
#ifdef CONFIG_EFI_STUB
if ((efi_ctx->efer & MSR_IA32_EFER_LMA_BIT) &&
(efi_ctx->cs_ar & 0x2000))
struct boot_ctx *vm0_init_ctx =
(struct boot_ctx *)vm0_boot_context;
/* VM0 bsp start mode is decided by the boot context
* setup by bootloader / bios */
if ((vm0_init_ctx->ia32_efer & MSR_IA32_EFER_LMA_BIT) &&
(vm0_init_ctx->cs_ar & 0x2000)) {
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
#elif CONFIG_START_VM0_BSP_64BIT
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
#endif
} else if (vm0_init_ctx->cr0 & CR0_PE) {
vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED;
} else {
return -EINVAL;
}
} else {
#ifdef CONFIG_EFI_STUB
/* currently non-vm0 will boot kernel directly */

View File

@ -5,9 +5,9 @@
*/
#include <hypervisor.h>
#ifdef CONFIG_EFI_STUB
#include <vm0_boot.h>
extern struct efi_ctx* efi_ctx;
#ifdef CONFIG_EFI_STUB
extern struct boot_ctx* efi_ctx;
#endif
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000U)
@ -15,6 +15,9 @@ extern struct efi_ctx* efi_ctx;
#define REAL_MODE_CODE_SEG_AR (0x009fU)
#define PROTECTED_MODE_DATA_SEG_AR (0xc093U)
#define PROTECTED_MODE_CODE_SEG_AR (0xc09bU)
#define DR7_INIT_VALUE (0x400UL)
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
static uint64_t cr0_host_mask;
static uint64_t cr0_always_on_mask;
@ -243,24 +246,6 @@ void exec_vmwrite16(uint32_t field, uint16_t value)
exec_vmwrite64(field, (uint64_t)value);
}
#define HV_ARCH_VMX_GET_CS(SEL) \
{ \
asm volatile ("movw %%cs, %%ax" : "=a"(sel)); \
}
static uint32_t get_cs_access_rights(void)
{
uint32_t usable_ar;
uint16_t sel_value;
asm volatile ("movw %%cs, %%ax" : "=a" (sel_value));
asm volatile ("lar %%eax, %%eax" : "=a" (usable_ar) : "a"(sel_value));
usable_ar = usable_ar >> 8U;
usable_ar &= 0xf0ffU; /* clear bits 11:8 */
return usable_ar;
}
static void init_cr0_cr4_host_mask(__unused struct vcpu *vcpu)
{
static bool inited = false;
@ -562,427 +547,195 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
return 0;
}
static void init_guest_context_real(struct vcpu *vcpu)
{
struct ext_context *ectx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
/* cs, ss, ds, es, fs, gs; cs will be override later. */
for(struct segment_sel *seg = &ectx->cs; seg <= &ectx->gs; seg++) {
seg->selector = 0U;
seg->base = 0UL;
seg->limit = 0xFFFFU;
seg->attr = REAL_MODE_DATA_SEG_AR;
}
if (is_vcpu_bsp(vcpu)) {
/* There are two cases that we will start bsp in real
* mode:
* 1. UOS start
* 2. SOS resume from S3
*
* For 1, DM will set correct entry_addr.
* For 2, SOS resume caller will set entry_addr to
* SOS wakeup vec. According to ACPI FACS spec,
* wakeup vec should be < 1MB. So we use < 1MB
* to detect whether it's resume from S3 and we
* setup CS:IP to
* (wakeup_vec >> 4):(wakeup_vec & 0x000F)
* if it's resume from S3.
*
*/
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
ectx->cs.selector = (uint16_t)
(((uint64_t)vcpu->entry_addr & 0xFFFF0UL) >> 4U);
ectx->cs.base = (uint64_t)ectx->cs.selector << 4U;
vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr & 0x0FUL);
} else {
/* BSP is initialized with real mode */
ectx->cs.selector = REAL_MODE_BSP_INIT_CODE_SEL;
/* For unrestricted guest, it is able
* to set a high base address
*/
ectx->cs.base = (uint64_t)vcpu->entry_addr & 0xFFFF0000UL;
vcpu_set_rip(vcpu, 0x0000FFF0UL);
}
} else {
/* AP is initialized with real mode
* and CS value is left shift 8 bits from sipi vector.
*/
ectx->cs.selector = (uint16_t)(vcpu->arch_vcpu.sipi_vector << 8U);
ectx->cs.base = (uint64_t)ectx->cs.selector << 4U;
}
ectx->cs.attr = REAL_MODE_CODE_SEG_AR;
ectx->gdtr.base = 0UL;
ectx->gdtr.limit = 0xFFFFU;
ectx->idtr.base = 0UL;
ectx->idtr.limit = 0xFFFFU;
}
static void init_guest_context_vm0_bsp(struct vcpu *vcpu)
{
struct ext_context *ectx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
struct boot_ctx * init_ctx = (struct boot_ctx *)vm0_boot_context;
uint16_t *sel;
struct segment_sel *seg;
for(seg = &ectx->cs, sel = &init_ctx->cs_sel;
seg <= &ectx->gs; seg ++, sel++) {
seg->base = 0UL;
seg->limit = 0xFFFFFFFFU;
seg->attr = PROTECTED_MODE_DATA_SEG_AR;
seg->selector = *sel;
}
ectx->cs.attr = init_ctx->cs_ar; /* override cs attr */
vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr);
vcpu_set_efer(vcpu, init_ctx->ia32_efer);
ectx->gdtr.base = init_ctx->gdt.base;
ectx->gdtr.limit = init_ctx->gdt.limit;
ectx->idtr.base = init_ctx->idt.base;
ectx->idtr.limit = init_ctx->idt.limit;
ectx->ldtr.selector = init_ctx->ldt_sel;
ectx->tr.selector = init_ctx->tr_sel;
#ifdef CONFIG_EFI_STUB
vcpu_set_rsp(vcpu, efi_ctx->gprs.rsp);
/* clear flags for CF/PF/AF/ZF/SF/OF */
vcpu_set_rflags(vcpu, efi_ctx->rflags & ~(0x8d5UL));
#endif
}
/* only be called for UOS when bsp start from protected mode */
static void init_guest_context_protect(struct vcpu *vcpu)
{
struct ext_context *ectx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
struct segment_sel *seg;
ectx->gdtr.base = create_guest_init_gdt(vcpu->vm, &ectx->gdtr.limit);
for(seg = &ectx->cs; seg <= &ectx->gs; seg ++) {
seg->base = 0UL;
seg->limit = 0xFFFFFFFFU;
seg->attr = PROTECTED_MODE_DATA_SEG_AR;
seg->selector = 0x18U;
}
ectx->cs.attr = PROTECTED_MODE_CODE_SEG_AR;
ectx->cs.selector = 0x10U; /* Linear code segment */
vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr);
}
/* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */
static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3,
uint64_t cr4)
{
struct cpu_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct ext_context *ectx = &ctx->ext_ctx;
vcpu_set_cr4(vcpu, cr4);
vcpu_set_cr0(vcpu, cr0);
exec_vmwrite(VMX_GUEST_CR3, cr3);
exec_vmwrite(VMX_GUEST_GDTR_BASE, ectx->gdtr.base);
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx", ectx->gdtr.base);
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, ectx->gdtr.limit);
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%016llx", ectx->gdtr.limit);
exec_vmwrite(VMX_GUEST_IDTR_BASE, ectx->idtr.base);
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx", ectx->idtr.base);
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, ectx->idtr.limit);
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%016llx", ectx->idtr.limit);
/* init segment selectors: es, cs, ss, ds, fs, gs, ldtr, tr */
load_segment(ectx->cs, VMX_GUEST_CS);
load_segment(ectx->ss, VMX_GUEST_SS);
load_segment(ectx->ds, VMX_GUEST_DS);
load_segment(ectx->es, VMX_GUEST_ES);
load_segment(ectx->fs, VMX_GUEST_FS);
load_segment(ectx->gs, VMX_GUEST_GS);
load_segment(ectx->tr, VMX_GUEST_TR);
load_segment(ectx->ldtr, VMX_GUEST_LDTR);
/* fixed values */
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, 0U);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, 0UL);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, 0UL);
exec_vmwrite(VMX_GUEST_PENDING_DEBUG_EXCEPT, 0UL);
exec_vmwrite(VMX_GUEST_IA32_DEBUGCTL_FULL, 0UL);
exec_vmwrite32(VMX_GUEST_INTERRUPTIBILITY_INFO, 0U);
exec_vmwrite32(VMX_GUEST_ACTIVITY_STATE, 0U);
exec_vmwrite32(VMX_GUEST_SMBASE, 0U);
vcpu_set_pat_ext(vcpu, PAT_POWER_ON_VALUE);
exec_vmwrite(VMX_GUEST_IA32_PAT_FULL, PAT_POWER_ON_VALUE);
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
}
static void init_guest_state(struct vcpu *vcpu)
{
uint16_t value16;
uint32_t value32;
uint64_t value64;
uint16_t sel;
uint32_t limit, access;
uint64_t base;
uint16_t ldt_idx = 0x38U;
uint16_t es = 0U, ss = 0U, ds = 0U, fs = 0U, gs = 0U, data32_idx;
uint16_t tr_sel = 0x70U;
struct vm *vm = vcpu->vm;
struct cpu_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct boot_ctx * init_ctx = (struct boot_ctx *)vm0_boot_context;
enum vm_cpu_mode vcpu_mode = get_vcpu_mode(vcpu);
pr_dbg("*********************");
pr_dbg("Initialize guest state");
pr_dbg("*********************");
/* Will not init vcpu mode to compatibility mode */
ASSERT(vcpu_mode != CPU_MODE_COMPATIBILITY,
"don't support start vcpu from compatibility mode");
/*************************************************/
/* Set up CRx */
/*************************************************/
pr_dbg("Natural-width********");
if (vcpu_mode == CPU_MODE_64BIT) {
vcpu_set_efer(vcpu, MSR_IA32_EFER_LME_BIT);
}
/* Setup guest control register values
* cr4 should be set before cr0, because when set cr0, cr4 value will be
* checked.
*/
if (vcpu_mode == CPU_MODE_REAL) {
vcpu_set_cr4(vcpu, 0UL);
exec_vmwrite(VMX_GUEST_CR3, 0UL);
vcpu_set_cr0(vcpu, CR0_ET | CR0_NE);
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
vcpu_set_cr4(vcpu, 0UL);
exec_vmwrite(VMX_GUEST_CR3, 0UL);
vcpu_set_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE);
} else if (vcpu_mode == CPU_MODE_64BIT) {
vcpu_set_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
exec_vmwrite(VMX_GUEST_CR3,
vm->arch_vm.guest_init_pml4 | CR3_PWT);
vcpu_set_cr0(vcpu, CR0_PG | CR0_PE | CR0_NE);
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/***************************************************/
/* Set up Flags - the value of RFLAGS on VM entry */
/***************************************************/
vcpu_set_rflags(vcpu, 0x2UL); /* Bit 1 is a active high reserved bit */
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", vcpu_get_rflags(vcpu));
/***************************************************/
/* Set Code Segment - CS */
/***************************************************/
if (vcpu_mode == CPU_MODE_REAL) {
if (is_vcpu_bsp(vcpu)) {
/* There are two cases that we will start bsp in real
* mode:
* 1. UOS start
* 2. SOS resume from S3
*
* For 1, DM will set correct entry_addr.
* For 2, SOS resume caller will set entry_addr to
* SOS wakeup vec. According to ACPI FACS spec,
* wakeup vec should be < 1MB. So we use < 1MB
* to detect whether it's resume from S3 and we
* setup CS:IP to
* (wakeup_vec >> 4):(wakeup_vec & 0x000F)
* if it's resume from S3.
*
*/
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
sel = (uint16_t)(((uint64_t)vcpu->entry_addr & 0xFFFF0UL)
>> 4U);
base = (uint64_t)sel << 4U;
} else {
/* BSP is initialized with real mode */
sel = REAL_MODE_BSP_INIT_CODE_SEL;
/* For unrestricted guest, it is able
* to set a high base address
*/
base = (uint64_t)vcpu->entry_addr &
0xFFFF0000UL;
}
} else {
/* AP is initialized with real mode
* and CS value is left shift 8 bits from sipi vector.
*/
sel = (uint16_t)(vcpu->arch_vcpu.sipi_vector << 8U);
base = (uint64_t)sel << 4U;
}
limit = 0xffffU;
access = REAL_MODE_CODE_SEG_AR;
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
limit = 0xffffffffU;
base = 0UL;
access = PROTECTED_MODE_CODE_SEG_AR;
sel = 0x10U; /* Linear CS selector in guest init gdt */
/* ldtr */
ctx->ext_ctx.ldtr.selector = 0U;
ctx->ext_ctx.ldtr.base = 0UL;
ctx->ext_ctx.ldtr.limit = 0xFFFFU;
ctx->ext_ctx.ldtr.attr = LDTR_AR;
/* tr */
ctx->ext_ctx.tr.selector = 0U;
ctx->ext_ctx.tr.base = 0UL;
ctx->ext_ctx.tr.limit = 0xFFFFU;
ctx->ext_ctx.tr.attr = TR_AR;
if(vcpu_mode == CPU_MODE_REAL) {
init_guest_context_real(vcpu);
init_guest_vmx(vcpu, CR0_ET | CR0_NE, 0, 0);
} else if (is_vm0(vcpu->vm) && is_vcpu_bsp(vcpu)) {
init_guest_context_vm0_bsp(vcpu);
init_guest_vmx(vcpu, init_ctx->cr0, init_ctx->cr3,
init_ctx->cr4 & ~CR4_VMXE);
} else {
HV_ARCH_VMX_GET_CS(sel);
access = get_cs_access_rights();
limit = 0xffffffffU;
base = 0UL;
init_guest_context_protect(vcpu);
init_guest_vmx(vcpu, CR0_ET | CR0_NE | CR0_PE, 0, 0);
}
/* Selector */
exec_vmwrite16(VMX_GUEST_CS_SEL, sel);
pr_dbg("VMX_GUEST_CS_SEL: 0x%hx ", sel);
/* Limit */
exec_vmwrite32(VMX_GUEST_CS_LIMIT, limit);
pr_dbg("VMX_GUEST_CS_LIMIT: 0x%x ", limit);
/* Access */
exec_vmwrite32(VMX_GUEST_CS_ATTR, access);
pr_dbg("VMX_GUEST_CS_ATTR: 0x%x ", access);
/* Base */
exec_vmwrite(VMX_GUEST_CS_BASE, base);
pr_dbg("VMX_GUEST_CS_BASE: 0x%016llx ", base);
/***************************************************/
/* Set up instruction pointer and stack pointer */
/***************************************************/
/* Set up guest instruction pointer */
value64 = 0UL;
if (vcpu_mode == CPU_MODE_REAL) {
/* RIP is set here */
if (is_vcpu_bsp(vcpu)) {
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
value64 = (uint64_t)vcpu->entry_addr & 0x0FUL;
}
else {
value64 = 0x0000FFF0UL;
}
}
} else {
value64 = (uint64_t)vcpu->entry_addr;
}
pr_dbg("GUEST RIP on VMEntry %016llx ", value64);
exec_vmwrite(VMX_GUEST_RIP, value64);
if (vcpu_mode == CPU_MODE_64BIT) {
/* Set up guest stack pointer to 0 */
value64 = 0UL;
pr_dbg("GUEST RSP on VMEntry %016llx ",
value64);
exec_vmwrite(VMX_GUEST_RSP, value64);
}
/***************************************************/
/* Set up GDTR, IDTR and LDTR */
/***************************************************/
/* GDTR - Global Descriptor Table */
if (vcpu_mode == CPU_MODE_REAL) {
/* Base */
base = 0UL;
/* Limit */
limit = 0xFFFFU;
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
base = create_guest_init_gdt(vcpu->vm, &limit);
} else if (vcpu_mode == CPU_MODE_64BIT) {
descriptor_table gdtb = {0U, 0UL};
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */
/* Obtain the current global descriptor table base */
asm volatile ("sgdt %0" : "=m"(gdtb)::"memory");
value32 = gdtb.limit;
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
gdtb.base |= 0xffff000000000000UL;
}
base = gdtb.base;
/* Limit */
limit = HOST_GDT_SIZE - 1U;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* GDTR Base */
exec_vmwrite(VMX_GUEST_GDTR_BASE, base);
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx ", base);
/* GDTR Limit */
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, limit);
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit);
/* IDTR - Interrupt Descriptor Table */
if ((vcpu_mode == CPU_MODE_REAL) ||
(vcpu_mode == CPU_MODE_PROTECTED)) {
/* Base */
base = 0UL;
/* Limit */
limit = 0xFFFFU;
} else if (vcpu_mode == CPU_MODE_64BIT) {
descriptor_table idtb = {0U, 0UL};
/* TODO: Should guest IDTR point to host IDTR ? */
asm volatile ("sidt %0":"=m"(idtb)::"memory");
/* Limit */
limit = idtb.limit;
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
idtb.base |= 0xffff000000000000UL;
}
/* Base */
base = idtb.base;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* IDTR Base */
exec_vmwrite(VMX_GUEST_IDTR_BASE, base);
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx ", base);
/* IDTR Limit */
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, limit);
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%x ", limit);
/***************************************************/
/* Debug register */
/***************************************************/
/* Set up guest Debug register */
value64 = 0x400UL;
exec_vmwrite(VMX_GUEST_DR7, value64);
pr_dbg("VMX_GUEST_DR7: 0x%016llx ", value64);
/***************************************************/
/* ES, CS, SS, DS, FS, GS */
/***************************************************/
data32_idx = 0x10U;
if (vcpu_mode == CPU_MODE_REAL) {
es = data32_idx;
ss = data32_idx;
ds = data32_idx;
fs = data32_idx;
gs = data32_idx;
limit = 0xffffU;
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
/* Linear data segment in guest init gdt */
es = 0x18U;
ss = 0x18U;
ds = 0x18U;
fs = 0x18U;
gs = 0x18U;
limit = 0xffffffffU;
} else if (vcpu_mode == CPU_MODE_64BIT) {
asm volatile ("movw %%es, %%ax":"=a" (es));
asm volatile ("movw %%ss, %%ax":"=a" (ss));
asm volatile ("movw %%ds, %%ax":"=a" (ds));
asm volatile ("movw %%fs, %%ax":"=a" (fs));
asm volatile ("movw %%gs, %%ax":"=a" (gs));
limit = 0xffffffffU;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* Selector */
exec_vmwrite16(VMX_GUEST_ES_SEL, es);
pr_dbg("VMX_GUEST_ES_SEL: 0x%hx ", es);
exec_vmwrite16(VMX_GUEST_SS_SEL, ss);
pr_dbg("VMX_GUEST_SS_SEL: 0x%hx ", ss);
exec_vmwrite16(VMX_GUEST_DS_SEL, ds);
pr_dbg("VMX_GUEST_DS_SEL: 0x%hx ", ds);
exec_vmwrite16(VMX_GUEST_FS_SEL, fs);
pr_dbg("VMX_GUEST_FS_SEL: 0x%hx ", fs);
exec_vmwrite16(VMX_GUEST_GS_SEL, gs);
pr_dbg("VMX_GUEST_GS_SEL: 0x%hx ", gs);
/* Limit */
exec_vmwrite32(VMX_GUEST_ES_LIMIT, limit);
pr_dbg("VMX_GUEST_ES_LIMIT: 0x%x ", limit);
exec_vmwrite32(VMX_GUEST_SS_LIMIT, limit);
pr_dbg("VMX_GUEST_SS_LIMIT: 0x%x ", limit);
exec_vmwrite32(VMX_GUEST_DS_LIMIT, limit);
pr_dbg("VMX_GUEST_DS_LIMIT: 0x%x ", limit);
exec_vmwrite32(VMX_GUEST_FS_LIMIT, limit);
pr_dbg("VMX_GUEST_FS_LIMIT: 0x%x ", limit);
exec_vmwrite32(VMX_GUEST_GS_LIMIT, limit);
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
/* Access */
if (vcpu_mode == CPU_MODE_REAL) {
value32 = REAL_MODE_DATA_SEG_AR;
}
else { /* same value for protected mode and long mode */
value32 = PROTECTED_MODE_DATA_SEG_AR;
}
exec_vmwrite32(VMX_GUEST_ES_ATTR, value32);
pr_dbg("VMX_GUEST_ES_ATTR: 0x%x ", value32);
exec_vmwrite32(VMX_GUEST_SS_ATTR, value32);
pr_dbg("VMX_GUEST_SS_ATTR: 0x%x ", value32);
exec_vmwrite32(VMX_GUEST_DS_ATTR, value32);
pr_dbg("VMX_GUEST_DS_ATTR: 0x%x ", value32);
exec_vmwrite32(VMX_GUEST_FS_ATTR, value32);
pr_dbg("VMX_GUEST_FS_ATTR: 0x%x ", value32);
exec_vmwrite32(VMX_GUEST_GS_ATTR, value32);
pr_dbg("VMX_GUEST_GS_ATTR: 0x%x ", value32);
/* Base */
if (vcpu_mode == CPU_MODE_REAL) {
value64 = (uint64_t)es << 4U;
} else {
value64 = 0UL;
}
exec_vmwrite(VMX_GUEST_ES_BASE, value64);
pr_dbg("VMX_GUEST_ES_BASE: 0x%016llx ", value64);
exec_vmwrite(VMX_GUEST_SS_BASE, value64);
pr_dbg("VMX_GUEST_SS_BASE: 0x%016llx ", value64);
exec_vmwrite(VMX_GUEST_DS_BASE, value64);
pr_dbg("VMX_GUEST_DS_BASE: 0x%016llx ", value64);
exec_vmwrite(VMX_GUEST_FS_BASE, value64);
pr_dbg("VMX_GUEST_FS_BASE: 0x%016llx ", value64);
exec_vmwrite(VMX_GUEST_GS_BASE, value64);
pr_dbg("VMX_GUEST_GS_BASE: 0x%016llx ", value64);
/***************************************************/
/* LDT and TR (dummy) */
/***************************************************/
value16 = ldt_idx;
exec_vmwrite16(VMX_GUEST_LDTR_SEL, value16);
pr_dbg("VMX_GUEST_LDTR_SEL: 0x%hu ", value16);
value32 = 0xffffffffU;
exec_vmwrite32(VMX_GUEST_LDTR_LIMIT, value32);
pr_dbg("VMX_GUEST_LDTR_LIMIT: 0x%x ", value32);
value32 = 0x10000U;
exec_vmwrite32(VMX_GUEST_LDTR_ATTR, value32);
pr_dbg("VMX_GUEST_LDTR_ATTR: 0x%x ", value32);
value64 = 0x00UL;
exec_vmwrite(VMX_GUEST_LDTR_BASE, value64);
pr_dbg("VMX_GUEST_LDTR_BASE: 0x%016llx ", value64);
/* Task Register */
value16 = tr_sel;
exec_vmwrite16(VMX_GUEST_TR_SEL, value16);
pr_dbg("VMX_GUEST_TR_SEL: 0x%hu ", value16);
value32 = 0xffU;
exec_vmwrite32(VMX_GUEST_TR_LIMIT, value32);
pr_dbg("VMX_GUEST_TR_LIMIT: 0x%x ", value32);
value32 = 0x8bU;
exec_vmwrite32(VMX_GUEST_TR_ATTR, value32);
pr_dbg("VMX_GUEST_TR_ATTR: 0x%x ", value32);
value64 = 0x00UL;
exec_vmwrite(VMX_GUEST_TR_BASE, value64);
pr_dbg("VMX_GUEST_TR_BASE: 0x%016llx ", value64);
value32 = 0U;
exec_vmwrite32(VMX_GUEST_INTERRUPTIBILITY_INFO, value32);
pr_dbg("VMX_GUEST_INTERRUPTIBILITY_INFO: 0x%x ",
value32);
value32 = 0U;
exec_vmwrite32(VMX_GUEST_ACTIVITY_STATE, value32);
pr_dbg("VMX_GUEST_ACTIVITY_STATE: 0x%x ",
value32);
value32 = 0U;
exec_vmwrite32(VMX_GUEST_SMBASE, value32);
pr_dbg("VMX_GUEST_SMBASE: 0x%x ", value32);
value32 = ((uint32_t)msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU);
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, value32);
pr_dbg("VMX_GUEST_IA32_SYSENTER_CS: 0x%x ",
value32);
value64 = PAT_POWER_ON_VALUE;
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value64);
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ", value64);
vcpu_set_pat_ext(vcpu, value64);
value64 = 0UL;
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, value64);
pr_dbg("VMX_GUEST_IA32_DEBUGCTL: 0x%016llx ",
value64);
/* Set up guest pending debug exception */
value64 = 0x0UL;
exec_vmwrite(VMX_GUEST_PENDING_DEBUG_EXCEPT, value64);
pr_dbg("VMX_GUEST_PENDING_DEBUG_EXCEPT: 0x%016llx ", value64);
/* These fields manage host and guest system calls * pg 3069 31.10.4.2
* - set up these fields with * contents of current SYSENTER ESP and
* EIP MSR values
*/
value64 = msr_read(MSR_IA32_SYSENTER_ESP);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, value64);
pr_dbg("VMX_GUEST_IA32_SYSENTER_ESP: 0x%016llx ",
value64);
value64 = msr_read(MSR_IA32_SYSENTER_EIP);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, value64);
pr_dbg("VMX_GUEST_IA32_SYSENTER_EIP: 0x%016llx ",
value64);
}
static void init_host_state(__unused struct vcpu *vcpu)
@ -1092,11 +845,6 @@ static void init_host_state(__unused struct vcpu *vcpu)
exec_vmwrite(VMX_HOST_IDTR_BASE, idtb.base);
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idtb.base);
value32 = (uint32_t)msr_read(MSR_IA32_SYSENTER_CS);
exec_vmwrite32(VMX_HOST_IA32_SYSENTER_CS, value32);
pr_dbg("VMX_HOST_IA32_SYSENTER_CS: 0x%x ",
value32);
/**************************************************/
/* 64-bit fields */
pr_dbg("64-bit********");
@ -1142,17 +890,10 @@ static void init_host_state(__unused struct vcpu *vcpu)
exec_vmwrite(VMX_HOST_RIP, value64);
pr_dbg("vm exit return address = %016llx ", value64);
/* These fields manage host and guest system calls * pg 3069 31.10.4.2
* - set up these fields with * contents of current SYSENTER ESP and
* EIP MSR values
*/
value = msr_read(MSR_IA32_SYSENTER_ESP);
exec_vmwrite(VMX_HOST_IA32_SYSENTER_ESP, value);
pr_dbg("VMX_HOST_IA32_SYSENTER_ESP: 0x%016llx ",
value);
value = msr_read(MSR_IA32_SYSENTER_EIP);
exec_vmwrite(VMX_HOST_IA32_SYSENTER_EIP, value);
pr_dbg("VMX_HOST_IA32_SYSENTER_EIP: 0x%016llx ", value);
/* As a type I hypervisor, just init sysenter fields to 0 */
exec_vmwrite32(VMX_HOST_IA32_SYSENTER_CS, 0U);
exec_vmwrite(VMX_HOST_IA32_SYSENTER_ESP, 0UL);
exec_vmwrite(VMX_HOST_IA32_SYSENTER_EIP, 0UL);
}
static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req)
@ -1467,77 +1208,6 @@ static void init_exit_ctrl(__unused struct vcpu *vcpu)
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, 0U);
}
#ifdef CONFIG_EFI_STUB
static void override_uefi_vmcs(struct vcpu *vcpu)
{
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
/* CR4 should be set before CR0, because when set CR0, CR4 value
* will be checked. */
/* VMXE is always on bit when set CR4, and not allowed to be set
* from input cr4 value */
vcpu_set_cr4(vcpu, efi_ctx->cr4 & ~CR4_VMXE);
exec_vmwrite(VMX_GUEST_CR3, efi_ctx->cr3);
vcpu_set_cr0(vcpu, efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE);
/* Selector */
exec_vmwrite16(VMX_GUEST_CS_SEL, efi_ctx->cs_sel);
pr_dbg("VMX_GUEST_CS_SEL: 0x%hx ", efi_ctx->cs_sel);
/* Access */
exec_vmwrite32(VMX_GUEST_CS_ATTR, efi_ctx->cs_ar);
pr_dbg("VMX_GUEST_CS_ATTR: 0x%x ", efi_ctx->cs_ar);
exec_vmwrite16(VMX_GUEST_ES_SEL, efi_ctx->es_sel);
pr_dbg("VMX_GUEST_ES_SEL: 0x%hx ", efi_ctx->es_sel);
exec_vmwrite16(VMX_GUEST_SS_SEL, efi_ctx->ss_sel);
pr_dbg("VMX_GUEST_SS_SEL: 0x%hx ", efi_ctx->ss_sel);
exec_vmwrite16(VMX_GUEST_DS_SEL, efi_ctx->ds_sel);
pr_dbg("VMX_GUEST_DS_SEL: 0x%hx ", efi_ctx->ds_sel);
exec_vmwrite16(VMX_GUEST_FS_SEL, efi_ctx->fs_sel);
pr_dbg("VMX_GUEST_FS_SEL: 0x%hx ", efi_ctx->fs_sel);
exec_vmwrite16(VMX_GUEST_GS_SEL, efi_ctx->gs_sel);
pr_dbg("VMX_GUEST_GS_SEL: 0x%hx ", efi_ctx->gs_sel);
/* Base */
exec_vmwrite(VMX_GUEST_ES_BASE, efi_ctx->es_sel << 4U);
exec_vmwrite(VMX_GUEST_SS_BASE, efi_ctx->ss_sel << 4U);
exec_vmwrite(VMX_GUEST_DS_BASE, efi_ctx->ds_sel << 4U);
exec_vmwrite(VMX_GUEST_FS_BASE, efi_ctx->fs_sel << 4U);
exec_vmwrite(VMX_GUEST_GS_BASE, efi_ctx->gs_sel << 4U);
/* RSP */
exec_vmwrite(VMX_GUEST_RSP, efi_ctx->rsp);
pr_dbg("GUEST RSP on VMEntry %x ", efi_ctx->rsp);
/* GDTR Base */
exec_vmwrite(VMX_GUEST_GDTR_BASE, efi_ctx->gdt.base);
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx ", efi_ctx->gdt.base);
/* GDTR Limit */
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, efi_ctx->gdt.limit);
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", efi_ctx->gdt.limit);
/* IDTR Base */
exec_vmwrite(VMX_GUEST_IDTR_BASE, efi_ctx->idt.base);
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx ", efi_ctx->idt.base);
/* IDTR Limit */
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, efi_ctx->idt.limit);
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%x ", efi_ctx->idt.limit);
}
/* Interrupt */
/* clear flags for CF/PF/AF/ZF/SF/OF */
vcpu_set_rflags(vcpu, efi_ctx->rflags & ~(0x8d5UL));
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", vcpu_get_rflags(vcpu));
}
#endif
int init_vmcs(struct vcpu *vcpu)
{
uint64_t vmx_rev_id;
@ -1573,11 +1243,6 @@ int init_vmcs(struct vcpu *vcpu)
init_entry_ctrl(vcpu);
init_exit_ctrl(vcpu);
#ifdef CONFIG_EFI_STUB
if (is_vm0(vcpu->vm) && vcpu->pcpu_id == BOOT_CPU_ID) {
override_uefi_vmcs(vcpu);
}
#endif
/* Return status to caller */
return status;
}

View File

@ -29,9 +29,7 @@
#include <hypervisor.h>
#include "acpi.h"
#ifdef CONFIG_EFI_STUB
#include <vm0_boot.h>
#endif
#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Ptr */
#define ACPI_OEM_ID_SIZE 6

View File

@ -6,9 +6,7 @@
#include <hypervisor.h>
#include <reloc.h>
#ifdef CONFIG_EFI_STUB
#include <vm0_boot.h>
#endif
struct Elf64_Dyn {
uint64_t d_tag;

View File

@ -45,7 +45,7 @@ extern const uint64_t guest_entry;
static UINT64 hv_hpa;
static inline void hv_jump(EFI_PHYSICAL_ADDRESS hv_start,
struct multiboot_info *mbi, struct efi_ctx *efi_ctx)
struct multiboot_info *mbi, struct boot_ctx *efi_ctx)
{
hv_func hf;
@ -67,7 +67,7 @@ static inline void hv_jump(EFI_PHYSICAL_ADDRESS hv_start,
}
EFI_STATUS
construct_mbi(struct multiboot_info **mbi_ret, struct efi_ctx *efi_ctx)
construct_mbi(struct multiboot_info **mbi_ret, struct boot_ctx *efi_ctx)
{
UINTN map_size, _map_size, map_key;
UINT32 desc_version;
@ -212,16 +212,16 @@ switch_to_guest_mode(EFI_HANDLE image)
EFI_PHYSICAL_ADDRESS addr;
EFI_STATUS err;
struct multiboot_info *mbi = NULL;
struct efi_ctx *efi_ctx;
struct boot_ctx *efi_ctx;
struct acpi_table_rsdp *rsdp = NULL;
int i;
EFI_CONFIGURATION_TABLE *config_table;
err = emalloc(sizeof(struct efi_ctx), 8, &addr);
err = emalloc(sizeof(struct boot_ctx), 8, &addr);
if (err != EFI_SUCCESS)
goto out;
efi_ctx = (struct efi_ctx *)(UINTN)addr;
efi_ctx = (struct boot_ctx *)(UINTN)addr;
/* reserve secondary memory region for hv */
err = emalloc_for_low_mem(&addr, CONFIG_LOW_RAM_SIZE);
@ -261,53 +261,26 @@ switch_to_guest_mode(EFI_HANDLE image)
if (err != EFI_SUCCESS)
goto out;
asm volatile ("mov %%cr0, %0" : "=r"(efi_ctx->cr0));
asm volatile ("mov %%cr3, %0" : "=r"(efi_ctx->cr3));
asm volatile ("mov %%cr4, %0" : "=r"(efi_ctx->cr4));
asm volatile ("sidt %0" :: "m" (efi_ctx->idt));
asm volatile ("sgdt %0" :: "m" (efi_ctx->gdt));
asm volatile ("str %0" :: "m" (efi_ctx->tr_sel));
asm volatile ("sldt %0" :: "m" (efi_ctx->ldt_sel));
asm volatile ("mov %%cs, %%ax" : "=a"(efi_ctx->cs_sel));
asm volatile ("lar %%eax, %%eax"
: "=a"(efi_ctx->cs_ar)
: "a"(efi_ctx->cs_sel)
);
efi_ctx->cs_ar = (efi_ctx->cs_ar >> 8) & 0xf0ff; /* clear bits 11:8 */
asm volatile ("mov %%es, %%ax" : "=a"(efi_ctx->es_sel));
asm volatile ("mov %%ss, %%ax" : "=a"(efi_ctx->ss_sel));
asm volatile ("mov %%ds, %%ax" : "=a"(efi_ctx->ds_sel));
asm volatile ("mov %%fs, %%ax" : "=a"(efi_ctx->fs_sel));
asm volatile ("mov %%gs, %%ax" : "=a"(efi_ctx->gs_sel));
uint32_t idx = 0xC0000080; /* MSR_IA32_EFER */
uint32_t msrl, msrh;
asm volatile ("rdmsr" : "=a"(msrl), "=d"(msrh) : "c"(idx));
efi_ctx->efer = ((uint64_t)msrh<<32) | msrl;
asm volatile ("pushf\n\t"
"pop %0\n\t"
: "=r"(efi_ctx->rflags)
: );
asm volatile ("movq %%rax, %0" : "=r"(efi_ctx->rax));
asm volatile ("movq %%rbx, %0" : "=r"(efi_ctx->rbx));
asm volatile ("movq %%rcx, %0" : "=r"(efi_ctx->rcx));
asm volatile ("movq %%rdx, %0" : "=r"(efi_ctx->rdx));
asm volatile ("movq %%rdi, %0" : "=r"(efi_ctx->rdi));
asm volatile ("movq %%rsi, %0" : "=r"(efi_ctx->rsi));
asm volatile ("movq %%rsp, %0" : "=r"(efi_ctx->rsp));
asm volatile ("movq %%rbp, %0" : "=r"(efi_ctx->rbp));
asm volatile ("movq %%r8, %0" : "=r"(efi_ctx->r8));
asm volatile ("movq %%r9, %0" : "=r"(efi_ctx->r9));
asm volatile ("movq %%r10, %0" : "=r"(efi_ctx->r10));
asm volatile ("movq %%r11, %0" : "=r"(efi_ctx->r11));
asm volatile ("movq %%r12, %0" : "=r"(efi_ctx->r12));
asm volatile ("movq %%r13, %0" : "=r"(efi_ctx->r13));
asm volatile ("movq %%r14, %0" : "=r"(efi_ctx->r14));
asm volatile ("movq %%r15, %0" : "=r"(efi_ctx->r15));
"pop %0\n\t"
: "=r"(efi_ctx->rflags)
: );
asm volatile ("movq %%rax, %0" : "=r"(efi_ctx->gprs.rax));
asm volatile ("movq %%rbx, %0" : "=r"(efi_ctx->gprs.rbx));
asm volatile ("movq %%rcx, %0" : "=r"(efi_ctx->gprs.rcx));
asm volatile ("movq %%rdx, %0" : "=r"(efi_ctx->gprs.rdx));
asm volatile ("movq %%rdi, %0" : "=r"(efi_ctx->gprs.rdi));
asm volatile ("movq %%rsi, %0" : "=r"(efi_ctx->gprs.rsi));
asm volatile ("movq %%rsp, %0" : "=r"(efi_ctx->gprs.rsp));
asm volatile ("movq %%rbp, %0" : "=r"(efi_ctx->gprs.rbp));
asm volatile ("movq %%r8, %0" : "=r"(efi_ctx->gprs.r8));
asm volatile ("movq %%r9, %0" : "=r"(efi_ctx->gprs.r9));
asm volatile ("movq %%r10, %0" : "=r"(efi_ctx->gprs.r10));
asm volatile ("movq %%r11, %0" : "=r"(efi_ctx->gprs.r11));
asm volatile ("movq %%r12, %0" : "=r"(efi_ctx->gprs.r12));
asm volatile ("movq %%r13, %0" : "=r"(efi_ctx->gprs.r13));
asm volatile ("movq %%r14, %0" : "=r"(efi_ctx->gprs.r14));
asm volatile ("movq %%r15, %0" : "=r"(efi_ctx->gprs.r15));
hv_jump(hv_hpa, mbi, efi_ctx);
asm volatile (".global guest_entry\n\t"

View File

@ -6,9 +6,7 @@
#include <hypervisor.h>
#include <multiboot.h>
#ifdef CONFIG_EFI_STUB
#include <vm0_boot.h>
#endif
/* IOAPIC id */
#define UEFI_IOAPIC_ID 8
@ -26,7 +24,7 @@
#ifdef CONFIG_EFI_STUB
static void efi_init(void);
struct efi_ctx* efi_ctx = NULL;
struct boot_ctx* efi_ctx = NULL;
struct lapic_regs uefi_lapic_regs;
static int efi_initialized;
@ -54,6 +52,8 @@ void efi_spurious_handler(int vector)
int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
{
int ret = 0;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
ASSERT(vm != NULL, "Incorrect argument");
@ -65,21 +65,8 @@ int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
vlapic_restore(vcpu->arch_vcpu.vlapic, &uefi_lapic_regs);
vcpu->entry_addr = (void *)efi_ctx->rip;
vcpu_set_gpreg(vcpu, CPU_REG_RAX, efi_ctx->rax);
vcpu_set_gpreg(vcpu, CPU_REG_RBX, efi_ctx->rbx);
vcpu_set_gpreg(vcpu, CPU_REG_RCX, efi_ctx->rcx);
vcpu_set_gpreg(vcpu, CPU_REG_RDX, efi_ctx->rdx);
vcpu_set_gpreg(vcpu, CPU_REG_RDI, efi_ctx->rdi);
vcpu_set_gpreg(vcpu, CPU_REG_RSI, efi_ctx->rsi);
vcpu_set_gpreg(vcpu, CPU_REG_RBP, efi_ctx->rbp);
vcpu_set_gpreg(vcpu, CPU_REG_R8, efi_ctx->r8);
vcpu_set_gpreg(vcpu, CPU_REG_R9, efi_ctx->r9);
vcpu_set_gpreg(vcpu, CPU_REG_R10, efi_ctx->r10);
vcpu_set_gpreg(vcpu, CPU_REG_R11, efi_ctx->r11);
vcpu_set_gpreg(vcpu, CPU_REG_R12, efi_ctx->r12);
vcpu_set_gpreg(vcpu, CPU_REG_R13, efi_ctx->r13);
vcpu_set_gpreg(vcpu, CPU_REG_R14, efi_ctx->r14);
vcpu_set_gpreg(vcpu, CPU_REG_R15, efi_ctx->r15);
memcpy_s(&cur_context->guest_cpu_regs, sizeof(struct cpu_gp_regs),
&efi_ctx->gprs, sizeof(struct cpu_gp_regs));
/* defer irq enabling till vlapic is ready */
CPU_IRQ_ENABLE();
@ -112,7 +99,7 @@ static void efi_init(void)
if (!(mbi->mi_flags & MULTIBOOT_INFO_HAS_DRIVES))
ASSERT(0, "no multiboot drivers for uefi found");
efi_ctx = (struct efi_ctx *)HPA2HVA((uint64_t)mbi->mi_drives_addr);
efi_ctx = (struct boot_ctx *)HPA2HVA((uint64_t)mbi->mi_drives_addr);
ASSERT(efi_ctx != NULL, "no uefi context found");
vm_sw_loader = uefi_sw_loader;

View File

@ -134,6 +134,7 @@ int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
uint64_t create_guest_init_gdt(struct vm *vm, uint32_t *limit);
extern uint8_t vm0_boot_context[];
#ifdef HV_DEBUG
void get_req_info(char *str_arg, int str_max);

View File

@ -4,53 +4,48 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef UEFI_H
#define UEFI_H
#ifndef VM0_BOOT_H
#define VM0_BOOT_H
#include <gpr.h>
typedef struct {
uint16_t limit;
uint64_t *base;
uint64_t base;
} __attribute__((packed)) dt_addr_t;
struct efi_ctx {
uint64_t rip;
void *rsdp;
void *ap_trampoline_buf;
dt_addr_t gdt;
dt_addr_t idt;
uint16_t tr_sel;
uint16_t ldt_sel;
struct boot_ctx {
uint64_t cr0;
uint64_t cr3;
uint64_t cr4;
uint64_t rflags;
dt_addr_t idt;
dt_addr_t gdt;
uint16_t ldt_sel;
uint16_t tr_sel;
/* align the order to ext_context */
uint16_t cs_sel;
uint32_t cs_ar;
uint16_t es_sel;
uint16_t ss_sel;
uint16_t ds_sel;
uint16_t es_sel;
uint16_t fs_sel;
uint16_t gs_sel;
uint64_t efer;
uint64_t rax;
uint64_t rbx;
uint64_t rcx;
uint64_t rdx;
uint64_t rdi;
uint64_t rsi;
uint64_t rsp;
uint64_t rbp;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint32_t cs_ar;
uint64_t ia32_efer;
#ifdef CONFIG_EFI_STUB
struct cpu_gp_regs gprs;
uint64_t rip;
uint64_t rflags;
void *rsdp;
void *ap_trampoline_buf;
#endif
}__attribute__((packed));
#ifdef CONFIG_EFI_STUB
void *get_rsdp_from_uefi(void);
void *get_ap_trampoline_buf(void);
#endif
#endif /* UEFI_H*/
#endif /* VM0_BOOT_H */