vcpu: add ext context support for world switch

move most of fields from run_context into ext_context for world switch.
these fields do not need doing runtime save/restore during vm exit/entry.

v3:
- update cr0/cr4 registers switch method

v2:
- use struct name ext_context instead of saved_context
- updated according to previous v2 patch

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-07-29 16:05:37 +08:00 committed by lijinxia
parent 3d5d6c96ec
commit 8a95b2abdc
6 changed files with 205 additions and 189 deletions

View File

@ -431,6 +431,18 @@ void bsp_boot_init(void)
ASSERT(offsetof(struct run_context, ia32_spec_ctrl) ==
CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL,
"run_context ia32_spec_ctrl offset not match");
ASSERT(offsetof(struct run_context, rflags) ==
CPU_CONTEXT_OFFSET_RFLAGS,
"run_context rflags offset not match");
ASSERT(offsetof(struct ext_context, cr3) ==
CPU_CONTEXT_OFFSET_CR3 - CPU_CONTEXT_OFFSET_EXTCTX_START,
"ext_context cr3 offset not match");
ASSERT(offsetof(struct ext_context, idtr) ==
CPU_CONTEXT_OFFSET_IDTR - CPU_CONTEXT_OFFSET_EXTCTX_START,
"ext_context idtr offset not match");
ASSERT(offsetof(struct ext_context, ldtr) ==
CPU_CONTEXT_OFFSET_LDTR - CPU_CONTEXT_OFFSET_EXTCTX_START,
"ext_context ldtr offset not match");
bitmap_set_nolock(BOOT_CPU_ID, &pcpu_active_bitmap);

View File

@ -16,94 +16,103 @@ vm_sw_loader_t vm_sw_loader;
inline uint64_t vcpu_get_gpreg(struct vcpu *vcpu, uint32_t reg)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
return cur_context->guest_cpu_regs.longs[reg];
return ctx->guest_cpu_regs.longs[reg];
}
inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
cur_context->guest_cpu_regs.longs[reg] = val;
ctx->guest_cpu_regs.longs[reg] = val;
}
inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
if (bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_RIP, &vcpu->reg_cached) == 0)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip =
exec_vmread(VMX_GUEST_RIP);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
ctx->rip = exec_vmread(VMX_GUEST_RIP);
return ctx->rip;
}
inline void vcpu_set_rip(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip = val;
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rip = val;
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
return cur_context->guest_cpu_regs.regs.rsp;
return ctx->guest_cpu_regs.regs.rsp;
}
inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
cur_context->guest_cpu_regs.regs.rsp = val;
ctx->guest_cpu_regs.regs.rsp = val;
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
if (bitmap_test(CPU_REG_EFER, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_EFER, &vcpu->reg_cached) == 0)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer
= exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer;
ctx->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
return ctx->ia32_efer;
}
inline void vcpu_set_efer(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer = val;
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.ia32_efer
= val;
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
if (bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_RFLAGS,
&vcpu->reg_cached) == 0 && vcpu->launched)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags =
exec_vmread(VMX_GUEST_RFLAGS);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags;
ctx->rflags = exec_vmread(VMX_GUEST_RFLAGS);
return ctx->rflags;
}
inline void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags = val;
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rflags =
val;
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
{
uint64_t mask;
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
if (bitmap_test_and_set_lock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
mask = exec_vmread(VMX_CR0_MASK);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr0 =
(exec_vmread(VMX_CR0_READ_SHADOW) & mask) |
ctx->cr0 = (exec_vmread(VMX_CR0_READ_SHADOW) & mask) |
(exec_vmread(VMX_GUEST_CR0) & (~mask));
}
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr0;
return ctx->cr0;
}
inline int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
@ -113,25 +122,27 @@ inline int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
{
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr2;
return vcpu->
arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2;
}
inline void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr2 = val;
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2 = val;
}
inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
{
uint64_t mask;
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
if (bitmap_test_and_set_lock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
mask = exec_vmread(VMX_CR4_MASK);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr4 =
(exec_vmread(VMX_CR4_READ_SHADOW) & mask) |
ctx->cr4 = (exec_vmread(VMX_CR4_READ_SHADOW) & mask) |
(exec_vmread(VMX_GUEST_CR4) & (~mask));
}
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr4;
return ctx->cr4;
}
inline int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
@ -141,12 +152,14 @@ inline int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
inline uint64_t vcpu_get_pat_ext(struct vcpu *vcpu)
{
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_pat;
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
ext_ctx.ia32_pat;
}
inline void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_pat = val;
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx.ia32_pat
= val;
}
struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
@ -279,22 +292,20 @@ int start_vcpu(struct vcpu *vcpu)
{
uint32_t instlen;
uint64_t rip;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
int64_t status = 0;
ASSERT(vcpu != NULL, "Incorrect arguments");
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RIP, cur_context->rip);
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RSP,
cur_context->guest_cpu_regs.regs.rsp);
exec_vmwrite(VMX_GUEST_RSP, ctx->guest_cpu_regs.regs.rsp);
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated))
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL,
cur_context->ia32_efer);
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
if (bitmap_test_and_clear_lock(CPU_REG_RFLAGS, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RFLAGS, cur_context->rflags);
exec_vmwrite(VMX_GUEST_RFLAGS, ctx->rflags);
/* If this VCPU is not already launched, launch it */
if (!vcpu->launched) {
@ -322,7 +333,7 @@ int start_vcpu(struct vcpu *vcpu)
msr_write(MSR_IA32_PRED_CMD, PRED_SET_IBPB);
/* Launch the VM */
status = vmx_vmrun(cur_context, VM_LAUNCH, ibrs_type);
status = vmx_vmrun(ctx, VM_LAUNCH, ibrs_type);
/* See if VM launched successfully */
if (status == 0) {
@ -341,7 +352,7 @@ int start_vcpu(struct vcpu *vcpu)
0xFFFFFFFFFFFFFFFFUL));
/* Resume the VM */
status = vmx_vmrun(cur_context, VM_RESUME, ibrs_type);
status = vmx_vmrun(ctx, VM_RESUME, ibrs_type);
}
vcpu->reg_cached = 0UL;
@ -351,7 +362,7 @@ int start_vcpu(struct vcpu *vcpu)
/* Obtain current VCPU instruction length */
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
cur_context->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
ctx->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
/* Obtain VM exit reason */
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);

View File

@ -206,98 +206,105 @@ void destroy_secure_world(struct vm *vm)
}
#endif
static void save_world_ctx(struct run_context *context)
static void save_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
{
/* cache on-demand run_context for efer/rflags/rsp/rip */
vcpu_get_efer(vcpu);
vcpu_get_rflags(vcpu);
vcpu_get_rsp(vcpu);
vcpu_get_rip(vcpu);
/* VMCS GUEST field */
/* TSC_OFFSET, CR3, RIP, RSP, RFLAGS already saved on VMEXIT */
context->cr0 = exec_vmread(VMX_CR0_READ_SHADOW);
context->cr4 = exec_vmread(VMX_CR4_READ_SHADOW);
context->vmx_cr0 = exec_vmread(VMX_GUEST_CR0);
context->vmx_cr4 = exec_vmread(VMX_GUEST_CR4);
context->dr7 = exec_vmread(VMX_GUEST_DR7);
context->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
ext_ctx->vmx_cr0 = exec_vmread(VMX_GUEST_CR0);
ext_ctx->vmx_cr4 = exec_vmread(VMX_GUEST_CR4);
ext_ctx->vmx_cr0_read_shadow = exec_vmread(VMX_CR0_READ_SHADOW);
ext_ctx->vmx_cr4_read_shadow = exec_vmread(VMX_CR4_READ_SHADOW);
ext_ctx->cr3 = exec_vmread(VMX_GUEST_CR3);
ext_ctx->dr7 = exec_vmread(VMX_GUEST_DR7);
ext_ctx->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
/*
* Similar to CR0 and CR4, the actual value of guest's IA32_PAT MSR
* (represented by context->vmx_ia32_pat) could be different from the
* value that guest reads (represented by context->ia32_pat).
* (represented by ext_ctx->vmx_ia32_pat) could be different from the
* value that guest reads (represented by ext_ctx->ia32_pat).
*
* the wrmsr handler keeps track of 'ia32_pat', and we only
* need to load 'vmx_ia32_pat' here.
*/
context->vmx_ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
context->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
save_segment(context->cs, VMX_GUEST_CS);
save_segment(context->ss, VMX_GUEST_SS);
save_segment(context->ds, VMX_GUEST_DS);
save_segment(context->es, VMX_GUEST_ES);
save_segment(context->fs, VMX_GUEST_FS);
save_segment(context->gs, VMX_GUEST_GS);
save_segment(context->tr, VMX_GUEST_TR);
save_segment(context->ldtr, VMX_GUEST_LDTR);
ext_ctx->vmx_ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
ext_ctx->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
ext_ctx->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
ext_ctx->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
save_segment(ext_ctx->cs, VMX_GUEST_CS);
save_segment(ext_ctx->ss, VMX_GUEST_SS);
save_segment(ext_ctx->ds, VMX_GUEST_DS);
save_segment(ext_ctx->es, VMX_GUEST_ES);
save_segment(ext_ctx->fs, VMX_GUEST_FS);
save_segment(ext_ctx->gs, VMX_GUEST_GS);
save_segment(ext_ctx->tr, VMX_GUEST_TR);
save_segment(ext_ctx->ldtr, VMX_GUEST_LDTR);
/* Only base and limit for IDTR and GDTR */
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
context->idtr.limit = exec_vmread32(VMX_GUEST_IDTR_LIMIT);
context->gdtr.limit = exec_vmread32(VMX_GUEST_GDTR_LIMIT);
ext_ctx->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
ext_ctx->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
ext_ctx->idtr.limit = exec_vmread32(VMX_GUEST_IDTR_LIMIT);
ext_ctx->gdtr.limit = exec_vmread32(VMX_GUEST_GDTR_LIMIT);
/* MSRs which not in the VMCS */
context->ia32_star = msr_read(MSR_IA32_STAR);
context->ia32_lstar = msr_read(MSR_IA32_LSTAR);
context->ia32_fmask = msr_read(MSR_IA32_FMASK);
context->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
ext_ctx->ia32_star = msr_read(MSR_IA32_STAR);
ext_ctx->ia32_lstar = msr_read(MSR_IA32_LSTAR);
ext_ctx->ia32_fmask = msr_read(MSR_IA32_FMASK);
ext_ctx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
/* FX area */
asm volatile("fxsave (%0)"
: : "r" (context->fxstore_guest_area) : "memory");
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static void load_world_ctx(struct run_context *context)
static void load_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
{
/* mark to update on-demand run_context for efer/rflags/rsp */
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
/* VMCS Execution field */
exec_vmwrite64(VMX_TSC_OFFSET_FULL, context->tsc_offset);
exec_vmwrite64(VMX_TSC_OFFSET_FULL, ext_ctx->tsc_offset);
/* VMCS GUEST field */
exec_vmwrite(VMX_CR0_READ_SHADOW, context->cr0);
exec_vmwrite(VMX_GUEST_CR3, context->cr3);
exec_vmwrite(VMX_CR4_READ_SHADOW, context->cr4);
exec_vmwrite(VMX_GUEST_CR0, context->vmx_cr0);
exec_vmwrite(VMX_GUEST_CR4, context->vmx_cr4);
exec_vmwrite(VMX_GUEST_RIP, context->rip);
exec_vmwrite(VMX_GUEST_RSP, context->guest_cpu_regs.regs.rsp);
exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags);
exec_vmwrite(VMX_GUEST_DR7, context->dr7);
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->vmx_ia32_pat);
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
load_segment(context->cs, VMX_GUEST_CS);
load_segment(context->ss, VMX_GUEST_SS);
load_segment(context->ds, VMX_GUEST_DS);
load_segment(context->es, VMX_GUEST_ES);
load_segment(context->fs, VMX_GUEST_FS);
load_segment(context->gs, VMX_GUEST_GS);
load_segment(context->tr, VMX_GUEST_TR);
load_segment(context->ldtr, VMX_GUEST_LDTR);
exec_vmwrite(VMX_GUEST_CR0, ext_ctx->vmx_cr0);
exec_vmwrite(VMX_GUEST_CR4, ext_ctx->vmx_cr4);
exec_vmwrite(VMX_CR0_READ_SHADOW, ext_ctx->vmx_cr0_read_shadow);
exec_vmwrite(VMX_CR4_READ_SHADOW, ext_ctx->vmx_cr4_read_shadow);
exec_vmwrite(VMX_GUEST_CR3, ext_ctx->cr3);
exec_vmwrite(VMX_GUEST_DR7, ext_ctx->dr7);
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, ext_ctx->ia32_debugctl);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, ext_ctx->vmx_ia32_pat);
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, ext_ctx->ia32_sysenter_cs);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, ext_ctx->ia32_sysenter_esp);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, ext_ctx->ia32_sysenter_eip);
load_segment(ext_ctx->cs, VMX_GUEST_CS);
load_segment(ext_ctx->ss, VMX_GUEST_SS);
load_segment(ext_ctx->ds, VMX_GUEST_DS);
load_segment(ext_ctx->es, VMX_GUEST_ES);
load_segment(ext_ctx->fs, VMX_GUEST_FS);
load_segment(ext_ctx->gs, VMX_GUEST_GS);
load_segment(ext_ctx->tr, VMX_GUEST_TR);
load_segment(ext_ctx->ldtr, VMX_GUEST_LDTR);
/* Only base and limit for IDTR and GDTR */
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
exec_vmwrite(VMX_GUEST_IDTR_BASE, ext_ctx->idtr.base);
exec_vmwrite(VMX_GUEST_GDTR_BASE, ext_ctx->gdtr.base);
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, ext_ctx->idtr.limit);
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, ext_ctx->gdtr.limit);
/* MSRs which not in the VMCS */
msr_write(MSR_IA32_STAR, context->ia32_star);
msr_write(MSR_IA32_LSTAR, context->ia32_lstar);
msr_write(MSR_IA32_FMASK, context->ia32_fmask);
msr_write(MSR_IA32_KERNEL_GS_BASE, context->ia32_kernel_gs_base);
msr_write(MSR_IA32_STAR, ext_ctx->ia32_star);
msr_write(MSR_IA32_LSTAR, ext_ctx->ia32_lstar);
msr_write(MSR_IA32_FMASK, ext_ctx->ia32_fmask);
msr_write(MSR_IA32_KERNEL_GS_BASE, ext_ctx->ia32_kernel_gs_base);
/* FX area */
asm volatile("fxrstor (%0)" : : "r" (context->fxstore_guest_area));
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
}
static void copy_smc_param(struct run_context *prev_ctx,
@ -314,14 +321,14 @@ void switch_world(struct vcpu *vcpu, int next_world)
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
/* save previous world context */
save_world_ctx(&arch_vcpu->contexts[!next_world]);
save_world_ctx(vcpu, &arch_vcpu->contexts[!next_world].ext_ctx);
/* load next world context */
load_world_ctx(&arch_vcpu->contexts[next_world]);
load_world_ctx(vcpu, &arch_vcpu->contexts[next_world].ext_ctx);
/* Copy SMC parameters: RDI, RSI, RDX, RBX */
copy_smc_param(&arch_vcpu->contexts[!next_world],
&arch_vcpu->contexts[next_world]);
copy_smc_param(&arch_vcpu->contexts[!next_world].run_ctx,
&arch_vcpu->contexts[next_world].run_ctx);
/* load EPTP for next world */
if (next_world == NORMAL_WORLD) {
@ -383,7 +390,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
* address(GPA) of startup_param on boot. Currently, the startup_param
* is put in the first page of trusty memory just followed by key_info.
*/
vcpu->arch_vcpu.contexts[SECURE_WORLD].guest_cpu_regs.regs.rdi
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rdi
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
return true;
@ -400,29 +407,13 @@ static bool init_secure_world_env(struct vcpu *vcpu,
uint32_t size)
{
vcpu->arch_vcpu.inst_len = 0U;
vcpu->arch_vcpu.contexts[SECURE_WORLD].rip = entry_gpa;
vcpu->arch_vcpu.contexts[SECURE_WORLD].guest_cpu_regs.regs.rsp =
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp =
TRUSTY_EPT_REBASE_GPA + size;
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset = 0UL;
vcpu->arch_vcpu.contexts[SECURE_WORLD].cr0 =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr0;
vcpu->arch_vcpu.contexts[SECURE_WORLD].cr4 =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr4;
vcpu->arch_vcpu.contexts[SECURE_WORLD].vmx_cr0 =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].vmx_cr0;
vcpu->arch_vcpu.contexts[SECURE_WORLD].vmx_cr4 =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].vmx_cr4;
vcpu->arch_vcpu.contexts[SECURE_WORLD].ia32_pat =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].ia32_pat;
vcpu->arch_vcpu.contexts[SECURE_WORLD].vmx_ia32_pat =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].vmx_ia32_pat;
exec_vmwrite(VMX_GUEST_RSP,
TRUSTY_EPT_REBASE_GPA + size);
exec_vmwrite64(VMX_TSC_OFFSET_FULL,
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset);
vcpu->arch_vcpu.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
vcpu->arch_vcpu.contexts[SECURE_WORLD].ext_ctx.ia32_pat =
vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx.ia32_pat;
return setup_trusty_info(vcpu, size, base_hpa);
}
@ -470,7 +461,7 @@ bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
HVA2HPA(vm->arch_vm.sworld_eptp) | (3UL<<3) | 6UL);
/* save Normal World context */
save_world_ctx(&vcpu->arch_vcpu.contexts[NORMAL_WORLD]);
save_world_ctx(vcpu, &vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx);
/* init secure world environment */
if (init_secure_world_env(vcpu,

View File

@ -317,7 +317,7 @@ uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
/*
* note: if context->cr0.CD is set, the actual value in guest's
* IA32_PAT MSR is PAT_ALL_UC_VALUE, which may be different from
* the saved value context->ia32_pat
* the saved value saved_context->ia32_pat
*/
return vcpu_get_pat_ext(vcpu);
}

View File

@ -59,8 +59,8 @@ vmx_vmrun:
/* 0x00000048 = MSR_IA32_SPEC_CTRL */
movl $0x00000048,%ecx
/*0xc0=192=PU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov 0xc0(%rdi),%rax
/*0xa0=168U=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov 0xa0(%rdi),%rax
movl $0,%edx
wrmsr
@ -231,8 +231,8 @@ vm_eval_error:
*/
movl $0x00000048,%ecx
rdmsr
/*192U=0xc0=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov %rax,0xc0(%rsi)
/*168U=0xa0=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov %rax,0xa0(%rsi)
/* 0x1 = SPEC_ENABLE_IBRS */
movl $0x1,%eax
movl $0,%edx
@ -255,8 +255,8 @@ ibrs_opt:
*/
movl $0x00000048,%ecx
rdmsr
/*192U=0xc0=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov %rax,0xc0(%rsi)
/*168U=0xa0=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
mov %rax,0xa0(%rsi)
/* 0x2 = SPEC_ENABLE_STIBP */
movl $0x2,%eax
movl $0,%edx

View File

@ -34,27 +34,15 @@
#define CPU_CONTEXT_OFFSET_R15 120U
#define CPU_CONTEXT_OFFSET_CR0 128U
#define CPU_CONTEXT_OFFSET_CR2 136U
#define CPU_CONTEXT_OFFSET_CR3 144U
#define CPU_CONTEXT_OFFSET_CR4 152U
#define CPU_CONTEXT_OFFSET_RIP 160U
#define CPU_CONTEXT_OFFSET_RFLAGS 168U
#define CPU_CONTEXT_OFFSET_TSC_OFFSET 184U
#define CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL 192U
#define CPU_CONTEXT_OFFSET_IA32_STAR 200U
#define CPU_CONTEXT_OFFSET_IA32_LSTAR 208U
#define CPU_CONTEXT_OFFSET_IA32_FMASK 216U
#define CPU_CONTEXT_OFFSET_IA32_KERNEL_GS_BASE 224U
#define CPU_CONTEXT_OFFSET_CS 280U
#define CPU_CONTEXT_OFFSET_SS 312U
#define CPU_CONTEXT_OFFSET_DS 344U
#define CPU_CONTEXT_OFFSET_ES 376U
#define CPU_CONTEXT_OFFSET_FS 408U
#define CPU_CONTEXT_OFFSET_GS 440U
#define CPU_CONTEXT_OFFSET_TR 472U
#define CPU_CONTEXT_OFFSET_IDTR 504U
#define CPU_CONTEXT_OFFSET_LDTR 536U
#define CPU_CONTEXT_OFFSET_GDTR 568U
#define CPU_CONTEXT_OFFSET_FXSTORE_GUEST_AREA 608U
#define CPU_CONTEXT_OFFSET_CR4 144U
#define CPU_CONTEXT_OFFSET_RIP 152U
#define CPU_CONTEXT_OFFSET_RFLAGS 160U
#define CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL 168U
#define CPU_CONTEXT_OFFSET_IA32_EFER 176U
#define CPU_CONTEXT_OFFSET_EXTCTX_START 184U
#define CPU_CONTEXT_OFFSET_CR3 184U
#define CPU_CONTEXT_OFFSET_IDTR 192U
#define CPU_CONTEXT_OFFSET_LDTR 216U
/*sizes of various registers within the VCPU data structure */
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
@ -120,24 +108,41 @@ struct run_context {
/** The guests CR registers 0, 2, 3 and 4. */
uint64_t cr0;
/* VMX_MACHINE_T_GUEST_CR2_OFFSET =
* offsetof(struct run_context, cr2) = 128
/* CPU_CONTEXT_OFFSET_CR2 =
* offsetof(struct run_context, cr2) = 136
*/
uint64_t cr2;
uint64_t cr3;
uint64_t cr4;
uint64_t rip;
uint64_t rflags;
uint64_t dr7;
uint64_t tsc_offset;
/* MSRs */
/* VMX_MACHINE_T_GUEST_SPEC_CTRL_OFFSET =
* offsetof(struct run_context, ia32_spec_ctrl) = 192
/* CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL =
* offsetof(struct run_context, ia32_spec_ctrl) = 168
*/
uint64_t ia32_spec_ctrl;
uint64_t ia32_efer;
};
/*
* extended context does not save/restore during vm exity/entry, it's mainly
* used in trusty world switch
*/
struct ext_context {
uint64_t cr3;
/* segment registers */
struct segment_sel idtr;
struct segment_sel ldtr;
struct segment_sel gdtr;
struct segment_sel tr;
struct segment_sel cs;
struct segment_sel ss;
struct segment_sel ds;
struct segment_sel es;
struct segment_sel fs;
struct segment_sel gs;
uint64_t ia32_star;
uint64_t ia32_lstar;
uint64_t ia32_fmask;
@ -145,26 +150,18 @@ struct run_context {
uint64_t ia32_pat;
uint64_t vmx_ia32_pat;
uint64_t ia32_efer;
uint32_t ia32_sysenter_cs;
uint64_t ia32_sysenter_esp;
uint64_t ia32_sysenter_eip;
uint64_t ia32_debugctl;
uint64_t dr7;
uint64_t tsc_offset;
uint64_t vmx_cr0;
uint64_t vmx_cr4;
/* segment registers */
struct segment_sel cs;
struct segment_sel ss;
struct segment_sel ds;
struct segment_sel es;
struct segment_sel fs;
struct segment_sel gs;
struct segment_sel tr;
struct segment_sel idtr;
struct segment_sel ldtr;
struct segment_sel gdtr;
uint64_t vmx_cr0_read_shadow;
uint64_t vmx_cr4_read_shadow;
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
uint64_t
@ -182,9 +179,14 @@ struct event_injection_info {
uint32_t error_code;
};
struct cpu_context {
struct run_context run_ctx;
struct ext_context ext_ctx;
};
struct vcpu_arch {
int cur_context;
struct run_context contexts[NR_WORLD];
struct cpu_context contexts[NR_WORLD];
/* A pointer to the VMCS for this CPU. */
void *vmcs;