mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 20:22:46 +00:00
hv:move down structures run_context and ext_context
Now the structures(run_context & ext_context) are defined in vcpu.h,and they are used in the lower-layer modules(wakeup.S), this patch move down the structures from vcpu.h to cpu.h to avoid reversed dependency. Tracked-On: #1842 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
4f98cb03a7
commit
60adef33d3
@ -796,6 +796,7 @@ INPUT = custom-doxygen/mainpage.md \
|
|||||||
../hypervisor/include/arch/x86/guest/guest_memory.h \
|
../hypervisor/include/arch/x86/guest/guest_memory.h \
|
||||||
../hypervisor/include/arch/x86/guest/ept.h \
|
../hypervisor/include/arch/x86/guest/ept.h \
|
||||||
../hypervisor/include/arch/x86/mmu.h \
|
../hypervisor/include/arch/x86/mmu.h \
|
||||||
|
../hypervisor/include/arch/x86/cpu.h \
|
||||||
../hypervisor/include/arch/x86/pgtable.h \
|
../hypervisor/include/arch/x86/pgtable.h \
|
||||||
../hypervisor/include/arch/x86/vtd.h \
|
../hypervisor/include/arch/x86/vtd.h \
|
||||||
../hypervisor/include/arch/x86/irq.h \
|
../hypervisor/include/arch/x86/irq.h \
|
||||||
|
@ -268,10 +268,10 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
|
|||||||
static void copy_smc_param(const struct run_context *prev_ctx,
|
static void copy_smc_param(const struct run_context *prev_ctx,
|
||||||
struct run_context *next_ctx)
|
struct run_context *next_ctx)
|
||||||
{
|
{
|
||||||
next_ctx->guest_cpu_regs.regs.rdi = prev_ctx->guest_cpu_regs.regs.rdi;
|
next_ctx->cpu_regs.regs.rdi = prev_ctx->cpu_regs.regs.rdi;
|
||||||
next_ctx->guest_cpu_regs.regs.rsi = prev_ctx->guest_cpu_regs.regs.rsi;
|
next_ctx->cpu_regs.regs.rsi = prev_ctx->cpu_regs.regs.rsi;
|
||||||
next_ctx->guest_cpu_regs.regs.rdx = prev_ctx->guest_cpu_regs.regs.rdx;
|
next_ctx->cpu_regs.regs.rdx = prev_ctx->cpu_regs.regs.rdx;
|
||||||
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
|
next_ctx->cpu_regs.regs.rbx = prev_ctx->cpu_regs.regs.rbx;
|
||||||
}
|
}
|
||||||
|
|
||||||
void switch_world(struct acrn_vcpu *vcpu, int32_t next_world)
|
void switch_world(struct acrn_vcpu *vcpu, int32_t next_world)
|
||||||
@ -344,7 +344,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu, uint32_t mem_size, uint64_
|
|||||||
* address(GPA) of startup_param on boot. Currently, the startup_param
|
* address(GPA) of startup_param on boot. Currently, the startup_param
|
||||||
* is put in the first page of trusty memory just followed by key_info.
|
* is put in the first page of trusty memory just followed by key_info.
|
||||||
*/
|
*/
|
||||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rdi
|
vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rdi
|
||||||
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
|
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
|
||||||
|
|
||||||
stac();
|
stac();
|
||||||
@ -378,7 +378,7 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu,
|
|||||||
|
|
||||||
vcpu->arch.inst_len = 0U;
|
vcpu->arch.inst_len = 0U;
|
||||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
|
vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
|
||||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp =
|
vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rsp =
|
||||||
TRUSTY_EPT_REBASE_GPA + size;
|
TRUSTY_EPT_REBASE_GPA + size;
|
||||||
|
|
||||||
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
|
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
|
||||||
@ -453,10 +453,8 @@ bool initialize_trusty(struct acrn_vcpu *vcpu, struct trusty_boot_param *boot_pa
|
|||||||
|
|
||||||
void save_sworld_context(struct acrn_vcpu *vcpu)
|
void save_sworld_context(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
(void)memcpy_s(&vcpu->vm->sworld_snapshot,
|
(void)memcpy_s((void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context),
|
||||||
sizeof(struct cpu_context),
|
(void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context));
|
||||||
&vcpu->arch.contexts[SECURE_WORLD],
|
|
||||||
sizeof(struct cpu_context));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void restore_sworld_context(struct acrn_vcpu *vcpu)
|
void restore_sworld_context(struct acrn_vcpu *vcpu)
|
||||||
@ -469,10 +467,8 @@ void restore_sworld_context(struct acrn_vcpu *vcpu)
|
|||||||
sworld_ctl->sworld_memory.length,
|
sworld_ctl->sworld_memory.length,
|
||||||
TRUSTY_EPT_REBASE_GPA);
|
TRUSTY_EPT_REBASE_GPA);
|
||||||
|
|
||||||
(void)memcpy_s(&vcpu->arch.contexts[SECURE_WORLD],
|
(void)memcpy_s((void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context),
|
||||||
sizeof(struct cpu_context),
|
(void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context));
|
||||||
&vcpu->vm->sworld_snapshot,
|
|
||||||
sizeof(struct cpu_context));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -38,7 +38,7 @@ uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
|
|||||||
const struct run_context *ctx =
|
const struct run_context *ctx =
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||||
|
|
||||||
return ctx->guest_cpu_regs.longs[reg];
|
return ctx->cpu_regs.longs[reg];
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||||
@ -46,7 +46,7 @@ void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
|||||||
struct run_context *ctx =
|
struct run_context *ctx =
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||||
|
|
||||||
ctx->guest_cpu_regs.longs[reg] = val;
|
ctx->cpu_regs.longs[reg] = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||||
@ -72,7 +72,7 @@ uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu)
|
|||||||
const struct run_context *ctx =
|
const struct run_context *ctx =
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||||
|
|
||||||
return ctx->guest_cpu_regs.regs.rsp;
|
return ctx->cpu_regs.regs.rsp;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||||
@ -80,7 +80,7 @@ void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
|||||||
struct run_context *ctx =
|
struct run_context *ctx =
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||||
|
|
||||||
ctx->guest_cpu_regs.regs.rsp = val;
|
ctx->cpu_regs.regs.rsp = val;
|
||||||
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
|
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,7 +274,7 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
|
|||||||
ectx->ldtr.attr = LDTR_AR;
|
ectx->ldtr.attr = LDTR_AR;
|
||||||
ectx->tr.attr = TR_AR;
|
ectx->tr.attr = TR_AR;
|
||||||
|
|
||||||
(void)memcpy_s((void *)&(ctx->guest_cpu_regs), sizeof(struct acrn_gp_regs),
|
(void)memcpy_s((void *)&(ctx->cpu_regs), sizeof(struct acrn_gp_regs),
|
||||||
(void *)&(vcpu_regs->gprs), sizeof(struct acrn_gp_regs));
|
(void *)&(vcpu_regs->gprs), sizeof(struct acrn_gp_regs));
|
||||||
|
|
||||||
vcpu_set_rip(vcpu, vcpu_regs->rip);
|
vcpu_set_rip(vcpu, vcpu_regs->rip);
|
||||||
@ -478,7 +478,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
|
|||||||
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
|
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
|
||||||
}
|
}
|
||||||
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated)) {
|
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated)) {
|
||||||
exec_vmwrite(VMX_GUEST_RSP, ctx->guest_cpu_regs.regs.rsp);
|
exec_vmwrite(VMX_GUEST_RSP, ctx->cpu_regs.regs.rsp);
|
||||||
}
|
}
|
||||||
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated)) {
|
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated)) {
|
||||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
|
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
|
||||||
@ -573,7 +573,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
|
|||||||
/* Obtain current VCPU instruction length */
|
/* Obtain current VCPU instruction length */
|
||||||
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
||||||
|
|
||||||
ctx->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
|
ctx->cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
|
||||||
|
|
||||||
/* Obtain VM exit reason */
|
/* Obtain VM exit reason */
|
||||||
vcpu->arch.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
vcpu->arch.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
||||||
|
@ -24,8 +24,7 @@
|
|||||||
static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||||
uint64_t cr4)
|
uint64_t cr4)
|
||||||
{
|
{
|
||||||
struct cpu_context *ctx =
|
struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
|
||||||
struct ext_context *ectx = &ctx->ext_ctx;
|
struct ext_context *ectx = &ctx->ext_ctx;
|
||||||
|
|
||||||
vcpu_set_cr4(vcpu, cr4);
|
vcpu_set_cr4(vcpu, cr4);
|
||||||
@ -71,8 +70,7 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
|||||||
|
|
||||||
static void init_guest_state(struct acrn_vcpu *vcpu)
|
static void init_guest_state(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct cpu_context *ctx =
|
struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
|
||||||
|
|
||||||
init_guest_vmx(vcpu, ctx->run_ctx.cr0, ctx->ext_ctx.cr3,
|
init_guest_vmx(vcpu, ctx->run_ctx.cr0, ctx->ext_ctx.cr3,
|
||||||
ctx->run_ctx.cr4 & ~(CR4_VMXE | CR4_SMXE | CR4_MCE));
|
ctx->run_ctx.cr4 & ~(CR4_VMXE | CR4_SMXE | CR4_MCE));
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <platform_acpi_info.h>
|
#include <platform_acpi_info.h>
|
||||||
#include <per_cpu.h>
|
#include <per_cpu.h>
|
||||||
#include <io.h>
|
#include <io.h>
|
||||||
|
#include <msr.h>
|
||||||
#include <pgtable.h>
|
#include <pgtable.h>
|
||||||
#include <host_pm.h>
|
#include <host_pm.h>
|
||||||
#include <trampoline.h>
|
#include <trampoline.h>
|
||||||
@ -16,7 +17,6 @@
|
|||||||
#include <ioapic.h>
|
#include <ioapic.h>
|
||||||
#include <vtd.h>
|
#include <vtd.h>
|
||||||
#include <lapic.h>
|
#include <lapic.h>
|
||||||
#include <vcpu.h>
|
|
||||||
|
|
||||||
struct cpu_context cpu_ctx;
|
struct cpu_context cpu_ctx;
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
#ifndef CPU_H
|
#ifndef CPU_H
|
||||||
#define CPU_H
|
#define CPU_H
|
||||||
#include <types.h>
|
#include <types.h>
|
||||||
|
#include <acrn_common.h>
|
||||||
|
|
||||||
/* Define CPU stack alignment */
|
/* Define CPU stack alignment */
|
||||||
#define CPU_STACK_ALIGN 16UL
|
#define CPU_STACK_ALIGN 16UL
|
||||||
@ -147,6 +148,41 @@
|
|||||||
/* Boot CPU ID */
|
/* Boot CPU ID */
|
||||||
#define BOOT_CPU_ID 0U
|
#define BOOT_CPU_ID 0U
|
||||||
|
|
||||||
|
/* Number of GPRs saved / restored for guest in VCPU structure */
|
||||||
|
#define NUM_GPRS 16U
|
||||||
|
#define GUEST_STATE_AREA_SIZE 512
|
||||||
|
|
||||||
|
#define CPU_CONTEXT_OFFSET_RAX 0U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RCX 8U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RDX 16U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RBX 24U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RSP 32U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RBP 40U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RSI 48U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RDI 56U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R8 64U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R9 72U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R10 80U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R11 88U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R12 96U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R13 104U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R14 112U
|
||||||
|
#define CPU_CONTEXT_OFFSET_R15 120U
|
||||||
|
#define CPU_CONTEXT_OFFSET_CR0 128U
|
||||||
|
#define CPU_CONTEXT_OFFSET_CR2 136U
|
||||||
|
#define CPU_CONTEXT_OFFSET_CR4 144U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RIP 152U
|
||||||
|
#define CPU_CONTEXT_OFFSET_RFLAGS 160U
|
||||||
|
#define CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL 168U
|
||||||
|
#define CPU_CONTEXT_OFFSET_IA32_EFER 176U
|
||||||
|
#define CPU_CONTEXT_OFFSET_EXTCTX_START 184U
|
||||||
|
#define CPU_CONTEXT_OFFSET_CR3 184U
|
||||||
|
#define CPU_CONTEXT_OFFSET_IDTR 192U
|
||||||
|
#define CPU_CONTEXT_OFFSET_LDTR 216U
|
||||||
|
|
||||||
|
/*sizes of various registers within the VCPU data structure */
|
||||||
|
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
|
||||||
|
|
||||||
#ifndef ASSEMBLER
|
#ifndef ASSEMBLER
|
||||||
|
|
||||||
#define AP_MASK (((1UL << get_pcpu_nums()) - 1UL) & ~(1UL << 0U))
|
#define AP_MASK (((1UL << get_pcpu_nums()) - 1UL) & ~(1UL << 0U))
|
||||||
@ -269,6 +305,89 @@ enum pcpu_boot_state {
|
|||||||
void make_pcpu_offline(uint16_t pcpu_id);
|
void make_pcpu_offline(uint16_t pcpu_id);
|
||||||
bool need_offline(uint16_t pcpu_id);
|
bool need_offline(uint16_t pcpu_id);
|
||||||
|
|
||||||
|
struct segment_sel {
|
||||||
|
uint16_t selector;
|
||||||
|
uint64_t base;
|
||||||
|
uint32_t limit;
|
||||||
|
uint32_t attr;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief registers info saved for vcpu running context
|
||||||
|
*/
|
||||||
|
struct run_context {
|
||||||
|
/* Contains the guest register set.
|
||||||
|
* NOTE: This must be the first element in the structure, so that the offsets
|
||||||
|
* in vmx_asm.S match
|
||||||
|
*/
|
||||||
|
union cpu_regs_t {
|
||||||
|
struct acrn_gp_regs regs;
|
||||||
|
uint64_t longs[NUM_GPRS];
|
||||||
|
} cpu_regs;
|
||||||
|
|
||||||
|
/** The guests CR registers 0, 2, 3 and 4. */
|
||||||
|
uint64_t cr0;
|
||||||
|
|
||||||
|
/* CPU_CONTEXT_OFFSET_CR2 =
|
||||||
|
* offsetof(struct run_context, cr2) = 136
|
||||||
|
*/
|
||||||
|
uint64_t cr2;
|
||||||
|
uint64_t cr4;
|
||||||
|
|
||||||
|
uint64_t rip;
|
||||||
|
uint64_t rflags;
|
||||||
|
|
||||||
|
/* CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL =
|
||||||
|
* offsetof(struct run_context, ia32_spec_ctrl) = 168
|
||||||
|
*/
|
||||||
|
uint64_t ia32_spec_ctrl;
|
||||||
|
uint64_t ia32_efer;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* extended context does not save/restore during vm exit/entry, it's mainly
|
||||||
|
* used in trusty world switch
|
||||||
|
*/
|
||||||
|
struct ext_context {
|
||||||
|
uint64_t cr3;
|
||||||
|
|
||||||
|
/* segment registers */
|
||||||
|
struct segment_sel idtr;
|
||||||
|
struct segment_sel ldtr;
|
||||||
|
struct segment_sel gdtr;
|
||||||
|
struct segment_sel tr;
|
||||||
|
struct segment_sel cs;
|
||||||
|
struct segment_sel ss;
|
||||||
|
struct segment_sel ds;
|
||||||
|
struct segment_sel es;
|
||||||
|
struct segment_sel fs;
|
||||||
|
struct segment_sel gs;
|
||||||
|
|
||||||
|
uint64_t ia32_star;
|
||||||
|
uint64_t ia32_lstar;
|
||||||
|
uint64_t ia32_fmask;
|
||||||
|
uint64_t ia32_kernel_gs_base;
|
||||||
|
|
||||||
|
uint64_t ia32_pat;
|
||||||
|
uint32_t ia32_sysenter_cs;
|
||||||
|
uint64_t ia32_sysenter_esp;
|
||||||
|
uint64_t ia32_sysenter_eip;
|
||||||
|
uint64_t ia32_debugctl;
|
||||||
|
|
||||||
|
uint64_t dr7;
|
||||||
|
uint64_t tsc_offset;
|
||||||
|
|
||||||
|
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
|
||||||
|
uint64_t
|
||||||
|
fxstore_guest_area[VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE / sizeof(uint64_t)]
|
||||||
|
__aligned(16);
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cpu_context {
|
||||||
|
struct run_context run_ctx;
|
||||||
|
struct ext_context ext_ctx;
|
||||||
|
};
|
||||||
|
|
||||||
/* Function prototypes */
|
/* Function prototypes */
|
||||||
void cpu_do_idle(void);
|
void cpu_do_idle(void);
|
||||||
void cpu_dead(void);
|
void cpu_dead(void);
|
||||||
|
@ -13,40 +13,6 @@
|
|||||||
#ifndef VCPU_H
|
#ifndef VCPU_H
|
||||||
#define VCPU_H
|
#define VCPU_H
|
||||||
|
|
||||||
/* Number of GPRs saved / restored for guest in VCPU structure */
|
|
||||||
#define NUM_GPRS 16U
|
|
||||||
#define GUEST_STATE_AREA_SIZE 512
|
|
||||||
|
|
||||||
#define CPU_CONTEXT_OFFSET_RAX 0U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RCX 8U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RDX 16U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RBX 24U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RSP 32U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RBP 40U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RSI 48U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RDI 56U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R8 64U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R9 72U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R10 80U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R11 88U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R12 96U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R13 104U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R14 112U
|
|
||||||
#define CPU_CONTEXT_OFFSET_R15 120U
|
|
||||||
#define CPU_CONTEXT_OFFSET_CR0 128U
|
|
||||||
#define CPU_CONTEXT_OFFSET_CR2 136U
|
|
||||||
#define CPU_CONTEXT_OFFSET_CR4 144U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RIP 152U
|
|
||||||
#define CPU_CONTEXT_OFFSET_RFLAGS 160U
|
|
||||||
#define CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL 168U
|
|
||||||
#define CPU_CONTEXT_OFFSET_IA32_EFER 176U
|
|
||||||
#define CPU_CONTEXT_OFFSET_EXTCTX_START 184U
|
|
||||||
#define CPU_CONTEXT_OFFSET_CR3 184U
|
|
||||||
#define CPU_CONTEXT_OFFSET_IDTR 192U
|
|
||||||
#define CPU_CONTEXT_OFFSET_LDTR 216U
|
|
||||||
|
|
||||||
/*sizes of various registers within the VCPU data structure */
|
|
||||||
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
|
|
||||||
|
|
||||||
#ifndef ASSEMBLER
|
#ifndef ASSEMBLER
|
||||||
|
|
||||||
@ -175,83 +141,6 @@ enum vm_cpu_mode {
|
|||||||
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
|
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct segment_sel {
|
|
||||||
uint16_t selector;
|
|
||||||
uint64_t base;
|
|
||||||
uint32_t limit;
|
|
||||||
uint32_t attr;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief registers info saved for vcpu running context
|
|
||||||
*/
|
|
||||||
struct run_context {
|
|
||||||
/* Contains the guest register set.
|
|
||||||
* NOTE: This must be the first element in the structure, so that the offsets
|
|
||||||
* in vmx_asm.S match
|
|
||||||
*/
|
|
||||||
union guest_cpu_regs_t {
|
|
||||||
struct acrn_gp_regs regs;
|
|
||||||
uint64_t longs[NUM_GPRS];
|
|
||||||
} guest_cpu_regs;
|
|
||||||
|
|
||||||
/** The guests CR registers 0, 2, 3 and 4. */
|
|
||||||
uint64_t cr0;
|
|
||||||
|
|
||||||
/* CPU_CONTEXT_OFFSET_CR2 =
|
|
||||||
* offsetof(struct run_context, cr2) = 136
|
|
||||||
*/
|
|
||||||
uint64_t cr2;
|
|
||||||
uint64_t cr4;
|
|
||||||
|
|
||||||
uint64_t rip;
|
|
||||||
uint64_t rflags;
|
|
||||||
|
|
||||||
/* CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL =
|
|
||||||
* offsetof(struct run_context, ia32_spec_ctrl) = 168
|
|
||||||
*/
|
|
||||||
uint64_t ia32_spec_ctrl;
|
|
||||||
uint64_t ia32_efer;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* extended context does not save/restore during vm exit/entry, it's mainly
|
|
||||||
* used in trusty world switch
|
|
||||||
*/
|
|
||||||
struct ext_context {
|
|
||||||
uint64_t cr3;
|
|
||||||
|
|
||||||
/* segment registers */
|
|
||||||
struct segment_sel idtr;
|
|
||||||
struct segment_sel ldtr;
|
|
||||||
struct segment_sel gdtr;
|
|
||||||
struct segment_sel tr;
|
|
||||||
struct segment_sel cs;
|
|
||||||
struct segment_sel ss;
|
|
||||||
struct segment_sel ds;
|
|
||||||
struct segment_sel es;
|
|
||||||
struct segment_sel fs;
|
|
||||||
struct segment_sel gs;
|
|
||||||
|
|
||||||
uint64_t ia32_star;
|
|
||||||
uint64_t ia32_lstar;
|
|
||||||
uint64_t ia32_fmask;
|
|
||||||
uint64_t ia32_kernel_gs_base;
|
|
||||||
|
|
||||||
uint64_t ia32_pat;
|
|
||||||
uint32_t ia32_sysenter_cs;
|
|
||||||
uint64_t ia32_sysenter_esp;
|
|
||||||
uint64_t ia32_sysenter_eip;
|
|
||||||
uint64_t ia32_debugctl;
|
|
||||||
|
|
||||||
uint64_t dr7;
|
|
||||||
uint64_t tsc_offset;
|
|
||||||
|
|
||||||
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
|
|
||||||
uint64_t
|
|
||||||
fxstore_guest_area[VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE / sizeof(uint64_t)]
|
|
||||||
__aligned(16);
|
|
||||||
};
|
|
||||||
|
|
||||||
/* 2 worlds: 0 for Normal World, 1 for Secure World */
|
/* 2 worlds: 0 for Normal World, 1 for Secure World */
|
||||||
#define NR_WORLD 2
|
#define NR_WORLD 2
|
||||||
@ -264,7 +153,7 @@ struct ext_context {
|
|||||||
|
|
||||||
#define EOI_EXIT_BITMAP_SIZE 256U
|
#define EOI_EXIT_BITMAP_SIZE 256U
|
||||||
|
|
||||||
struct cpu_context {
|
struct guest_cpu_context {
|
||||||
struct run_context run_ctx;
|
struct run_context run_ctx;
|
||||||
struct ext_context ext_ctx;
|
struct ext_context ext_ctx;
|
||||||
|
|
||||||
@ -304,7 +193,7 @@ struct acrn_vcpu_arch {
|
|||||||
struct acrn_vmtrr vmtrr;
|
struct acrn_vmtrr vmtrr;
|
||||||
|
|
||||||
int32_t cur_context;
|
int32_t cur_context;
|
||||||
struct cpu_context contexts[NR_WORLD];
|
struct guest_cpu_context contexts[NR_WORLD];
|
||||||
|
|
||||||
/* common MSRs, world_msrs[] is a subset of it */
|
/* common MSRs, world_msrs[] is a subset of it */
|
||||||
uint64_t guest_msrs[NUM_GUEST_MSRS];
|
uint64_t guest_msrs[NUM_GUEST_MSRS];
|
||||||
|
@ -135,7 +135,7 @@ struct acrn_vm {
|
|||||||
* so the snapshot only stores the vcpu0's run_context
|
* so the snapshot only stores the vcpu0's run_context
|
||||||
* of secure world.
|
* of secure world.
|
||||||
*/
|
*/
|
||||||
struct cpu_context sworld_snapshot;
|
struct guest_cpu_context sworld_snapshot;
|
||||||
|
|
||||||
uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
|
uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
|
||||||
struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
|
struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
|
||||||
|
Loading…
Reference in New Issue
Block a user