hv: cpu: align general-purpose register layout with vmx

In current code, general-purpose register layout in run_context is not align with the
general-purpose register index when vmexit. So hv needs to map the index used during
vmexit to the index of the general-purpose register in run_context.
This patch align the layout, so that no mapping needed.

Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Binbin Wu 2018-07-24 18:50:37 +08:00 committed by lijinxia
parent 3d6ff0e5f4
commit b2802f3d1d
11 changed files with 232 additions and 259 deletions

View File

@ -379,51 +379,51 @@ void bsp_boot_init(void)
"trusty_startup_param + key_info > 1Page size(4KB)!");
ASSERT(NR_WORLD == 2, "Only 2 Worlds supported!");
ASSERT(offsetof(struct cpu_regs, rax) ==
ASSERT(offsetof(struct cpu_gp_regs, rax) ==
CPU_CONTEXT_OFFSET_RAX,
"cpu_regs rax offset not match");
ASSERT(offsetof(struct cpu_regs, rbx) ==
"cpu_gp_regs rax offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rbx) ==
CPU_CONTEXT_OFFSET_RBX,
"cpu_regs rbx offset not match");
ASSERT(offsetof(struct cpu_regs, rcx) ==
"cpu_gp_regs rbx offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rcx) ==
CPU_CONTEXT_OFFSET_RCX,
"cpu_regs rcx offset not match");
ASSERT(offsetof(struct cpu_regs, rdx) ==
"cpu_gp_regs rcx offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rdx) ==
CPU_CONTEXT_OFFSET_RDX,
"cpu_regs rdx offset not match");
ASSERT(offsetof(struct cpu_regs, rbp) ==
"cpu_gp_regs rdx offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rbp) ==
CPU_CONTEXT_OFFSET_RBP,
"cpu_regs rbp offset not match");
ASSERT(offsetof(struct cpu_regs, rsi) ==
"cpu_gp_regs rbp offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rsi) ==
CPU_CONTEXT_OFFSET_RSI,
"cpu_regs rsi offset not match");
ASSERT(offsetof(struct cpu_regs, rdi) ==
"cpu_gp_regs rsi offset not match");
ASSERT(offsetof(struct cpu_gp_regs, rdi) ==
CPU_CONTEXT_OFFSET_RDI,
"cpu_regs rdi offset not match");
ASSERT(offsetof(struct cpu_regs, r8) ==
"cpu_gp_regs rdi offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r8) ==
CPU_CONTEXT_OFFSET_R8,
"cpu_regs r8 offset not match");
ASSERT(offsetof(struct cpu_regs, r9) ==
"cpu_gp_regs r8 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r9) ==
CPU_CONTEXT_OFFSET_R9,
"cpu_regs r9 offset not match");
ASSERT(offsetof(struct cpu_regs, r10) ==
"cpu_gp_regs r9 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r10) ==
CPU_CONTEXT_OFFSET_R10,
"cpu_regs r10 offset not match");
ASSERT(offsetof(struct cpu_regs, r11) ==
"cpu_gp_regs r10 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r11) ==
CPU_CONTEXT_OFFSET_R11,
"cpu_regs r11 offset not match");
ASSERT(offsetof(struct cpu_regs, r12) ==
"cpu_gp_regs r11 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r12) ==
CPU_CONTEXT_OFFSET_R12,
"cpu_regs r12 offset not match");
ASSERT(offsetof(struct cpu_regs, r13) ==
"cpu_gp_regs r12 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r13) ==
CPU_CONTEXT_OFFSET_R13,
"cpu_regs r13 offset not match");
ASSERT(offsetof(struct cpu_regs, r14) ==
"cpu_gp_regs r13 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r14) ==
CPU_CONTEXT_OFFSET_R14,
"cpu_regs r14 offset not match");
ASSERT(offsetof(struct cpu_regs, r15) ==
"cpu_gp_regs r14 offset not match");
ASSERT(offsetof(struct cpu_gp_regs, r15) ==
CPU_CONTEXT_OFFSET_R15,
"cpu_regs r15 offset not match");
"cpu_gp_regs r15 offset not match");
ASSERT(offsetof(struct run_context, cr2) ==
CPU_CONTEXT_OFFSET_CR2,
"run_context cr2 offset not match");

View File

@ -38,17 +38,22 @@
* These register names is used in condition statement.
* Within the following groups,register name need to be
* kept in order:
* General register names group (CPU_REG_RAX~CPU_REG_RDI);
* General register names group (CPU_REG_RAX~CPU_REG_R15);
* Non general register names group (CPU_REG_CR0~CPU_REG_GDTR);
* Segement register names group (CPU_REG_ES~CPU_REG_GS).
*/
enum cpu_reg_name {
/* General purpose register layout should align with
* struct cpu_gp_regs
*/
CPU_REG_RAX,
CPU_REG_RBX,
CPU_REG_RCX,
CPU_REG_RDX,
CPU_REG_RBX,
CPU_REG_RSP,
CPU_REG_RBP,
CPU_REG_RSI,
CPU_REG_RDI,
CPU_REG_R8,
CPU_REG_R9,
CPU_REG_R10,
@ -57,13 +62,12 @@ enum cpu_reg_name {
CPU_REG_R13,
CPU_REG_R14,
CPU_REG_R15,
CPU_REG_RDI,
CPU_REG_CR0,
CPU_REG_CR2,
CPU_REG_CR3,
CPU_REG_CR4,
CPU_REG_DR7,
CPU_REG_RSP,
CPU_REG_RIP,
CPU_REG_RFLAGS,
/*CPU_REG_NATURAL_LAST*/
@ -127,7 +131,7 @@ enum cpu_reg_name {
#define CPU_REG_FIRST CPU_REG_RAX
#define CPU_REG_LAST CPU_REG_GDTR
#define CPU_REG_GENERAL_FIRST CPU_REG_RAX
#define CPU_REG_GENERAL_LAST CPU_REG_RDI
#define CPU_REG_GENERAL_LAST CPU_REG_R15
#define CPU_REG_NONGENERAL_FIRST CPU_REG_CR0
#define CPU_REG_NONGENERAL_LAST CPU_REG_GDTR
#define CPU_REG_NATURAL_FIRST CPU_REG_CR0

View File

@ -211,7 +211,7 @@ int start_vcpu(struct vcpu *vcpu)
cur_context->rip = exec_vmread(VMX_GUEST_RIP);
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
cur_context->rsp = exec_vmread(VMX_GUEST_RSP);
cur_context->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
cur_context->rflags = exec_vmread(VMX_GUEST_RFLAGS);
/* Obtain VM exit reason */

View File

@ -97,7 +97,7 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
status = -EIO;
break;
} else {
struct cpu_regs *regs =
struct cpu_gp_regs *regs =
&cur_context->guest_cpu_regs.regs;
if (direction == 0) {

View File

@ -274,7 +274,7 @@ static void load_world_ctx(struct run_context *context)
exec_vmwrite(VMX_GUEST_CR0, context->vmx_cr0);
exec_vmwrite(VMX_GUEST_CR4, context->vmx_cr4);
exec_vmwrite(VMX_GUEST_RIP, context->rip);
exec_vmwrite(VMX_GUEST_RSP, context->rsp);
exec_vmwrite(VMX_GUEST_RSP, context->guest_cpu_regs.regs.rsp);
exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags);
exec_vmwrite(VMX_GUEST_DR7, context->dr7);
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
@ -408,7 +408,7 @@ static bool init_secure_world_env(struct vcpu *vcpu,
{
vcpu->arch_vcpu.inst_len = 0U;
vcpu->arch_vcpu.contexts[SECURE_WORLD].rip = entry_gpa;
vcpu->arch_vcpu.contexts[SECURE_WORLD].rsp =
vcpu->arch_vcpu.contexts[SECURE_WORLD].guest_cpu_regs.regs.rsp =
TRUSTY_EPT_REBASE_GPA + size;
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset = 0UL;

View File

@ -263,28 +263,10 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
uint64_t *regptr;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
static const int reg_trans_tab[16] = {
[0] = CPU_CONTEXT_INDEX_RAX,
[1] = CPU_CONTEXT_INDEX_RCX,
[2] = CPU_CONTEXT_INDEX_RDX,
[3] = CPU_CONTEXT_INDEX_RBX,
[4] = 0xFF, /* for sp reg, should not be used, just for init */
[5] = CPU_CONTEXT_INDEX_RBP,
[6] = CPU_CONTEXT_INDEX_RSI,
[7] = CPU_CONTEXT_INDEX_RDI,
[8] = CPU_CONTEXT_INDEX_R8,
[9] = CPU_CONTEXT_INDEX_R9,
[10] = CPU_CONTEXT_INDEX_R10,
[11] = CPU_CONTEXT_INDEX_R11,
[12] = CPU_CONTEXT_INDEX_R12,
[13] = CPU_CONTEXT_INDEX_R13,
[14] = CPU_CONTEXT_INDEX_R14,
[15] = CPU_CONTEXT_INDEX_R15,
};
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
ASSERT(idx != 4, "index should not be 4 (target SP)");
regptr = cur_context->guest_cpu_regs.longs + reg_trans_tab[idx];
ASSERT(idx>=0 && idx<=15, "index out of range")
regptr = cur_context->guest_cpu_regs.longs + idx;
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
(vcpu->arch_vcpu.exit_qualification) << 4) |

View File

@ -81,43 +81,43 @@ next:
/* Compare the launch flag to see if launching (1) or resuming (0) */
cmp $VM_LAUNCH, %rsi
/*128U=0x80=PU_CONTEXT_OFFSET_CR2*/
mov 0x80(%rdi),%rax
/*136U=0x88=CPU_CONTEXT_OFFSET_CR2*/
mov 0x88(%rdi),%rax
mov %rax,%cr2
/*
* 0U=0x0=CPU_CONTEXT_OFFSET_RAX
* 8U=0x8=CPU_CONTEXT_OFFSET_RBX
* 16U=0x10=CPU_CONTEXT_OFFSET_RCX
* 24U=0x18=CPU_CONTEXT_OFFSET_RDX
* 32U=0x20=CPU_CONTEXT_OFFSET_RBP
* 40U=0x28=CPU_CONTEXT_OFFSET_RSI
* 48U=0x30=CPU_CONTEXT_OFFSET_R8
* 56U=0x38=CPU_CONTEXT_OFFSET_R9
* 64U=0x40=CPU_CONTEXT_OFFSET_R10
* 72U=0x48=CPU_CONTEXT_OFFSET_R11
* 80U=0x50=CPU_CONTEXT_OFFSET_R12
* 88U=0x58=CPU_CONTEXT_OFFSET_R13
* 96U=0x60=CPU_CONTEXT_OFFSET_R14
* 104U=0x68=CPU_CONTEXT_OFFSET_R15
* 8U=0x8=CPU_CONTEXT_OFFSET_RCX
* 16U=0x10=CPU_CONTEXT_OFFSET_RDX
* 24U=0x18=CPU_CONTEXT_OFFSET_RBX
* 40U=0x28=CPU_CONTEXT_OFFSET_RBP
* 48U=0x30=CPU_CONTEXT_OFFSET_RSI
* 64U=0x40=CPU_CONTEXT_OFFSET_R8
* 72U=0x48=CPU_CONTEXT_OFFSET_R9
* 80U=0x50=CPU_CONTEXT_OFFSET_R10
* 88U=0x58=CPU_CONTEXT_OFFSET_R11
* 6U=0x60=CPU_CONTEXT_OFFSET_R12
* 104U=0x68=CPU_CONTEXT_OFFSET_R13
* 112U=0x70=CPU_CONTEXT_OFFSET_R14
* 120U=0x78=CPU_CONTEXT_OFFSET_R15
*/
mov 0x0(%rdi),%rax
mov 0x8(%rdi),%rbx
mov 0x10(%rdi),%rcx
mov 0x18(%rdi),%rdx
mov 0x20(%rdi),%rbp
mov 0x28(%rdi),%rsi
mov 0x30(%rdi),%r8
mov 0x38(%rdi),%r9
mov 0x40(%rdi),%r10
mov 0x48(%rdi),%r11
mov 0x50(%rdi),%r12
mov 0x58(%rdi),%r13
mov 0x60(%rdi),%r14
mov 0x68(%rdi),%r15
mov 0x8(%rdi),%rcx
mov 0x10(%rdi),%rdx
mov 0x18(%rdi),%rbx
mov 0x28(%rdi),%rbp
mov 0x30(%rdi),%rsi
mov 0x40(%rdi),%r8
mov 0x48(%rdi),%r9
mov 0x50(%rdi),%r10
mov 0x58(%rdi),%r11
mov 0x60(%rdi),%r12
mov 0x68(%rdi),%r13
mov 0x70(%rdi),%r14
mov 0x78(%rdi),%r15
/*112U=0x70=CPU_CONTEXT_OFFSET_RDI*/
mov 0x70(%rdi),%rdi
/*56U=0x38=CPU_CONTEXT_OFFSET_RDI*/
mov 0x38(%rdi),%rdi
/* Execute appropriate VMX instruction */
je vm_launch
@ -146,45 +146,45 @@ vm_exit:
mov %rax,0x0(%rdi)
mov %cr2,%rax
/*128U=0x80=CPU_CONTEXT_OFFSET_CR2*/
mov %rax,0x80(%rdi)
/*136U=0x88=CPU_CONTEXT_OFFSET_CR2*/
mov %rax,0x88(%rdi)
/*
* 8U=0x8=CPU_CONTEXT_OFFSET_RBX
* 16U=0x10=CPU_CONTEXT_OFFSET_RCX
* 24U=0x18=CPU_CONTEXT_OFFSET_RDX
* 32U=0x20=CPU_CONTEXT_OFFSET_RBP
* 40U=0x28=CPU_CONTEXT_OFFSET_RSI
* 48U=0x30=CPU_CONTEXT_OFFSET_R8
* 56U=0x38=CPU_CONTEXT_OFFSET_R9
* 64U=0x40=CPU_CONTEXT_OFFSET_R10
* 72U=0x48=CPU_CONTEXT_OFFSET_R11
* 80U=0x50=CPU_CONTEXT_OFFSET_R12
* 88U=0x58=CPU_CONTEXT_OFFSET_R13
* 96U=0x60=CPU_CONTEXT_OFFSET_R14
* 104U=0x68=CPU_CONTEXT_OFFSET_R15
* 8U=0x8=CPU_CONTEXT_OFFSET_RCX
* 16U=0x10=CPU_CONTEXT_OFFSET_RDX
* 24U=0x18=CPU_CONTEXT_OFFSET_RBX
* 40U=0x28=CPU_CONTEXT_OFFSET_RBP
* 48U=0x30=CPU_CONTEXT_OFFSET_RSI
* 64U=0x40=CPU_CONTEXT_OFFSET_R8
* 72U=0x48=CPU_CONTEXT_OFFSET_R9
* 80U=0x50=CPU_CONTEXT_OFFSET_R10
* 88U=0x58=CPU_CONTEXT_OFFSET_R11
* 96U=0x60=CPU_CONTEXT_OFFSET_R12
* 104U=0x68=CPU_CONTEXT_OFFSET_R13
* 112U=0x70=CPU_CONTEXT_OFFSET_R14
* 120U=0x78=CPU_CONTEXT_OFFSET_R15
*/
mov %rbx,0x8(%rdi)
mov %rcx,0x10(%rdi)
mov %rdx,0x18(%rdi)
mov %rbp,0x20(%rdi)
mov %rsi,0x28(%rdi)
mov %r8,0x30(%rdi)
mov %r9,0x38(%rdi)
mov %r10,0x40(%rdi)
mov %r11,0x48(%rdi)
mov %r12,0x50(%rdi)
mov %r13,0x58(%rdi)
mov %r14,0x60(%rdi)
mov %r15,0x68(%rdi)
mov %rcx,0x8(%rdi)
mov %rdx,0x10(%rdi)
mov %rbx,0x18(%rdi)
mov %rbp,0x28(%rdi)
mov %rsi,0x30(%rdi)
mov %r8,0x40(%rdi)
mov %r9,0x48(%rdi)
mov %r10,0x50(%rdi)
mov %r11,0x58(%rdi)
mov %r12,0x60(%rdi)
mov %r13,0x68(%rdi)
mov %r14,0x70(%rdi)
mov %r15,0x78(%rdi)
/* Load guest RDI off host stack and into RDX */
mov 0(%rsp),%rdx
/* Save guest RDI to guest state area
*112U=0x70=CPU_CONTEXT_OFFSET_RDI
*/
mov %rdx,0x70(%rdi)
/*56U=0x38=CPU_CONTEXT_OFFSET_RDI*/
mov %rdx,0x38(%rdi)
/* Save RDI to RSI for later SPEC_CTRL save*/
mov %rdi,%rsi

View File

@ -34,38 +34,38 @@
__enter_s3:
/*
* 0U=0x0=CPU_CONTEXT_OFFSET_RAX
* 8U=0x8=CPU_CONTEXT_OFFSET_RBX
* 16U=0x10=CPU_CONTEXT_OFFSET_RCX
* 24U=0x18=CPU_CONTEXT_OFFSET_RDX
* 112U=0x70=CPU_CONTEXT_OFFSET_RDI
* 40U=0x28=CPU_CONTEXT_OFFSET_RSI
* 32U=0x20=CPU_CONTEXT_OFFSET_RBP
* 160=0xa0=CPU_CONTEXT_OFFSET_RSP
* 48U=0x30=CPU_CONTEXT_OFFSET_R8
* 56U=0x38=CPU_CONTEXT_OFFSET_R9
* 64U=0x40=CPU_CONTEXT_OFFSET_R10
* 72U=0x48=CPU_CONTEXT_OFFSET_R11
* 80U=0x50=CPU_CONTEXT_OFFSET_R12
* 88U=0x58=CPU_CONTEXT_OFFSET_R13
* 96U=0x60=CPU_CONTEXT_OFFSET_R14
* 104U=0x68=CPU_CONTEXT_OFFSET_R15
* 8U=0x8=CPU_CONTEXT_OFFSET_RCX
* 16U=0x10=CPU_CONTEXT_OFFSET_RDX
* 24U=0x18=CPU_CONTEXT_OFFSET_RBX
* 32U=0x20=CPU_CONTEXT_OFFSET_RSP
* 40U=0x28=CPU_CONTEXT_OFFSET_RBP
* 48U=0x30=CPU_CONTEXT_OFFSET_RSI
* 56U=0x38=CPU_CONTEXT_OFFSET_RDI
* 64U=0x40=CPU_CONTEXT_OFFSET_R8
* 72U=0x48=CPU_CONTEXT_OFFSET_R9
* 80U=0x50=CPU_CONTEXT_OFFSET_R10
* 88U=0x58=CPU_CONTEXT_OFFSET_R11
* 96U=0x60=CPU_CONTEXT_OFFSET_R12
* 104U=0x68=CPU_CONTEXT_OFFSET_R13
* 112U=0x70=CPU_CONTEXT_OFFSET_R14
* 120U=0x78=CPU_CONTEXT_OFFSET_R15
*/
movq %rax, 0x0 + cpu_ctx(%rip)
movq %rbx, 0x8 + cpu_ctx(%rip)
movq %rcx, 0x10 + cpu_ctx(%rip)
movq %rdx, 0x18 + cpu_ctx(%rip)
movq %rdi, 0x70 + cpu_ctx(%rip)
movq %rsi, 0x28 + cpu_ctx(%rip)
movq %rbp, 0x20 + cpu_ctx(%rip)
movq %rsp, 0xa0 + cpu_ctx(%rip)
movq %r8, 0x30 + cpu_ctx(%rip)
movq %r9, 0x38 + cpu_ctx(%rip)
movq %r10, 0x40 + cpu_ctx(%rip)
movq %r11, 0x48 + cpu_ctx(%rip)
movq %r12, 0x50 + cpu_ctx(%rip)
movq %r13, 0x58 + cpu_ctx(%rip)
movq %r14, 0x60 + cpu_ctx(%rip)
movq %r15, 0x68 + cpu_ctx(%rip)
movq %rcx, 0x8 + cpu_ctx(%rip)
movq %rdx, 0x10 + cpu_ctx(%rip)
movq %rbx, 0x18 + cpu_ctx(%rip)
movq %rsp, 0x20 + cpu_ctx(%rip)
movq %rbp, 0x28 + cpu_ctx(%rip)
movq %rsi, 0x30 + cpu_ctx(%rip)
movq %rdi, 0x38 + cpu_ctx(%rip)
movq %r8, 0x40 + cpu_ctx(%rip)
movq %r9, 0x48 + cpu_ctx(%rip)
movq %r10, 0x50 + cpu_ctx(%rip)
movq %r11, 0x58 + cpu_ctx(%rip)
movq %r12, 0x60 + cpu_ctx(%rip)
movq %r13, 0x68 + cpu_ctx(%rip)
movq %r14, 0x70 + cpu_ctx(%rip)
movq %r15, 0x78 + cpu_ctx(%rip)
pushfq
/*168U=0xa8=CPU_CONTEXT_OFFSET_RFLAGS*/
@ -77,25 +77,25 @@ __enter_s3:
sldt 0x218 + cpu_ctx(%rip)
mov %cr0, %rax
/*120U=0x78=CPU_CONTEXT_OFFSET_CR0*/
mov %rax, 0x78 + cpu_ctx(%rip)
/*128U=0x80=CPU_CONTEXT_OFFSET_CR0*/
mov %rax, 0x80 + cpu_ctx(%rip)
mov %cr3, %rax
/*136U=0x88=CPU_CONTEXT_OFFSET_CR3*/
mov %rax, 0x88 + cpu_ctx(%rip)
/*144U=0x90=CPU_CONTEXT_OFFSET_CR3*/
mov %rax, 0x90 + cpu_ctx(%rip)
mov %cr4, %rax
/*144U=0x90=CPU_CONTEXT_OFFSET_CR4*/
mov %rax, 0x90 + cpu_ctx(%rip)
/*152U=0x98=CPU_CONTEXT_OFFSET_CR4*/
mov %rax, 0x98 + cpu_ctx(%rip)
wbinvd
/*24U=0x18=CPU_CONTEXT_OFFSET_RDX*/
movq 0x18 + cpu_ctx(%rip), %rdx /* pm1b_cnt_val */
/*112U=0x70=CPU_CONTEXT_OFFSET_RDI*/
movq 0x70 + cpu_ctx(%rip), %rdi /* *vm */
/*40U=0x28=CPU_CONTEXT_OFFSET_RSI*/
movq 0x28 + cpu_ctx(%rip), %rsi /* pm1a_cnt_val */
/*16U=0x10=CPU_CONTEXT_OFFSET_RDX*/
movq 0x10 + cpu_ctx(%rip), %rdx /* pm1b_cnt_val */
/*56U=0x38=CPU_CONTEXT_OFFSET_RDI*/
movq 0x38 + cpu_ctx(%rip), %rdi /* *vm */
/*48U=0x30=CPU_CONTEXT_OFFSET_RSI*/
movq 0x30 + cpu_ctx(%rip), %rsi /* pm1a_cnt_val */
call do_acpi_s3
@ -113,16 +113,16 @@ __enter_s3:
*/
.global restore_s3_context
restore_s3_context:
/*144U=0x90=CPU_CONTEXT_OFFSET_CR4*/
mov 0x90 + cpu_ctx(%rip), %rax
/*152U=0x98=CPU_CONTEXT_OFFSET_CR4*/
mov 0x98 + cpu_ctx(%rip), %rax
mov %rax, %cr4
/*136U=0x88=CPU_CONTEXT_OFFSET_CR3*/
mov 0x88 + cpu_ctx(%rip), %rax
/*144U=0x90=CPU_CONTEXT_OFFSET_CR3*/
mov 0x90 + cpu_ctx(%rip), %rax
mov %rax, %cr3
/*120U=0x78=CPU_CONTEXT_OFFSET_CR0*/
mov 0x78 + cpu_ctx(%rip), %rax
/*128U=0x80=CPU_CONTEXT_OFFSET_CR0*/
mov 0x80 + cpu_ctx(%rip), %rax
mov %rax, %cr0
/*504U=0x1f8=CPU_CONTEXT_OFFSET_IDTR*/
@ -132,10 +132,10 @@ restore_s3_context:
/*
*312U=0x138=CPU_CONTEXT_OFFSET_SS
*160=0xa0=CPU_CONTEXT_OFFSET_RSP
*32=0x20=CPU_CONTEXT_OFFSET_RSP
*/
mov 0x138 + cpu_ctx(%rip), %ss
movq 0xa0 + cpu_ctx(%rip), %rsp
movq 0x20 + cpu_ctx(%rip), %rsp
/*168U=0xa8=CPU_CONTEXT_OFFSET_RFLAGS*/
pushq 0xa8 + cpu_ctx(%rip)
@ -146,35 +146,35 @@ restore_s3_context:
/*
* 0U=0x0=CPU_CONTEXT_OFFSET_RAX
* 8U=0x8=CPU_CONTEXT_OFFSET_RBX
* 16U=0x10=CPU_CONTEXT_OFFSET_RCX
* 24U=0x18=CPU_CONTEXT_OFFSET_RDX
* 112U=0x70=CPU_CONTEXT_OFFSET_RDI
* 40U=0x28=CPU_CONTEXT_OFFSET_RSI
* 32U=0x20=CPU_CONTEXT_OFFSET_RBP
* 48U=0x30=CPU_CONTEXT_OFFSET_R8
* 56U=0x38=CPU_CONTEXT_OFFSET_R9
* 64U=0x40=CPU_CONTEXT_OFFSET_R10
* 72U=0x48=CPU_CONTEXT_OFFSET_R11
* 80U=0x50=CPU_CONTEXT_OFFSET_R12
* 88U=0x58=CPU_CONTEXT_OFFSET_R13
* 96U=0x60=CPU_CONTEXT_OFFSET_R14
* 104U=0x68=CPU_CONTEXT_OFFSET_R15
* 8U=0x8=CPU_CONTEXT_OFFSET_RCX
* 16U=0x10=CPU_CONTEXT_OFFSET_RDX
* 24U=0x18=CPU_CONTEXT_OFFSET_RBX
* 40U=0x28=CPU_CONTEXT_OFFSET_RBP
* 48U=0x30=CPU_CONTEXT_OFFSET_RSI
* 56U=0x38=CPU_CONTEXT_OFFSET_RDI
* 64U=0x40=CPU_CONTEXT_OFFSET_R8
* 72U=0x48=CPU_CONTEXT_OFFSET_R9
* 80U=0x50=CPU_CONTEXT_OFFSET_R10
* 88U=0x58=CPU_CONTEXT_OFFSET_R11
* 96U=0x60=CPU_CONTEXT_OFFSET_R12
* 104U=0x68=CPU_CONTEXT_OFFSET_R13
* 112U=0x70=CPU_CONTEXT_OFFSET_R14
* 120U=0x79=CPU_CONTEXT_OFFSET_R15
*/
movq 0x0 + cpu_ctx(%rip), %rax
movq 0x8 + cpu_ctx(%rip), %rbx
movq 0x10 + cpu_ctx(%rip), %rcx
movq 0x18 + cpu_ctx(%rip), %rdx
movq 0x70 + cpu_ctx(%rip), %rdi
movq 0x28 + cpu_ctx(%rip), %rsi
movq 0x20 + cpu_ctx(%rip), %rbp
movq 0x30 + cpu_ctx(%rip), %r8
movq 0x38 + cpu_ctx(%rip), %r9
movq 0x40 + cpu_ctx(%rip), %r10
movq 0x48 + cpu_ctx(%rip), %r11
movq 0x50 + cpu_ctx(%rip), %r12
movq 0x58 + cpu_ctx(%rip), %r13
movq 0x60 + cpu_ctx(%rip), %r14
movq 0x68 + cpu_ctx(%rip), %r15
movq 0x8 + cpu_ctx(%rip), %rcx
movq 0x10 + cpu_ctx(%rip), %rdx
movq 0x18 + cpu_ctx(%rip), %rbx
movq 0x28 + cpu_ctx(%rip), %rbp
movq 0x30 + cpu_ctx(%rip), %rsi
movq 0x38 + cpu_ctx(%rip), %rdi
movq 0x40 + cpu_ctx(%rip), %r8
movq 0x48 + cpu_ctx(%rip), %r9
movq 0x50 + cpu_ctx(%rip), %r10
movq 0x58 + cpu_ctx(%rip), %r11
movq 0x60 + cpu_ctx(%rip), %r12
movq 0x68 + cpu_ctx(%rip), %r13
movq 0x70 + cpu_ctx(%rip), %r14
movq 0x78 + cpu_ctx(%rip), %r15
retq

View File

@ -62,7 +62,7 @@ static void dump_guest_reg(struct vcpu *vcpu)
printf("= RIP=0x%016llx RSP=0x%016llx "
"RFLAGS=0x%016llx\r\n",
cur_context->rip,
cur_context->rsp,
cur_context->guest_cpu_regs.regs.rsp,
cur_context->rflags);
printf("= CR0=0x%016llx CR2=0x%016llx "
" CR3=0x%016llx\r\n",
@ -105,19 +105,19 @@ static void dump_guest_stack(struct vcpu *vcpu)
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint32_t err_code = 0;
if (copy_from_gva(vcpu, tmp, cur_context->rsp, DUMP_STACK_SIZE,
&err_code) < 0) {
if (copy_from_gva(vcpu, tmp, cur_context->guest_cpu_regs.regs.rsp,
DUMP_STACK_SIZE, &err_code) < 0) {
printf("\r\nUnabled to Copy Guest Stack:\r\n");
return;
}
printf("\r\nGuest Stack:\r\n");
printf("Dump stack for vcpu %hu, from gva 0x%016llx\r\n",
vcpu->vcpu_id, cur_context->rsp);
vcpu->vcpu_id, cur_context->guest_cpu_regs.regs.rsp);
for (i = 0U; i < (DUMP_STACK_SIZE/32U); i++) {
printf("guest_rsp(0x%llx): 0x%016llx 0x%016llx "
"0x%016llx 0x%016llx\r\n",
(cur_context->rsp+(i*32)),
(cur_context->guest_cpu_regs.regs.rsp+(i*32)),
tmp[i*4], tmp[(i*4)+1],
tmp[(i*4)+2], tmp[(i*4)+3]);
}

View File

@ -714,7 +714,7 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
shell_puts(p_shell, temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= RIP=0x%016llx RSP=0x%016llx "
"RFLAGS=0x%016llx\r\n", cur_context->rip,
cur_context->rsp, cur_context->rflags);
cur_context->guest_cpu_regs.regs.rsp, cur_context->rflags);
shell_puts(p_shell, temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= CR0=0x%016llx CR2=0x%016llx "
" CR3=0x%016llx\r\n", cur_context->cr0,
@ -752,7 +752,7 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
shell_puts(p_shell, temp_str);
/* dump sp */
status = copy_from_gva(vcpu, tmp, cur_context->rsp,
status = copy_from_gva(vcpu, tmp, cur_context->guest_cpu_regs.regs.rsp,
DUMPREG_SP_SIZE*sizeof(uint64_t), &err_code);
if (status < 0) {
/* copy_from_gva fail */
@ -761,7 +761,7 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
snprintf(temp_str, MAX_STR_SIZE,
"\r\nDump RSP for vm %hu, from "
"gva 0x%016llx\r\n",
vm_id, cur_context->rsp);
vm_id, cur_context->guest_cpu_regs.regs.rsp);
shell_puts(p_shell, temp_str);
for (i = 0UL; i < 8UL; i++) {

View File

@ -13,46 +13,30 @@
#define REG_SIZE 8
/* Number of GPRs saved / restored for guest in VCPU structure */
#define NUM_GPRS 15U
#define NUM_GPRS 16U
#define GUEST_STATE_AREA_SIZE 512
#define CPU_CONTEXT_INDEX_RAX 0
#define CPU_CONTEXT_INDEX_RBX 1
#define CPU_CONTEXT_INDEX_RCX 2
#define CPU_CONTEXT_INDEX_RDX 3
#define CPU_CONTEXT_INDEX_RBP 4
#define CPU_CONTEXT_INDEX_RSI 5
#define CPU_CONTEXT_INDEX_R8 6
#define CPU_CONTEXT_INDEX_R9 7
#define CPU_CONTEXT_INDEX_R10 8
#define CPU_CONTEXT_INDEX_R11 9
#define CPU_CONTEXT_INDEX_R12 10
#define CPU_CONTEXT_INDEX_R13 11
#define CPU_CONTEXT_INDEX_R14 12
#define CPU_CONTEXT_INDEX_R15 13
#define CPU_CONTEXT_INDEX_RDI 14
#define CPU_CONTEXT_OFFSET_RAX 0U
#define CPU_CONTEXT_OFFSET_RBX 8U
#define CPU_CONTEXT_OFFSET_RCX 16U
#define CPU_CONTEXT_OFFSET_RDX 24U
#define CPU_CONTEXT_OFFSET_RBP 32U
#define CPU_CONTEXT_OFFSET_RSI 40U
#define CPU_CONTEXT_OFFSET_R8 48U
#define CPU_CONTEXT_OFFSET_R9 56U
#define CPU_CONTEXT_OFFSET_R10 64U
#define CPU_CONTEXT_OFFSET_R11 72U
#define CPU_CONTEXT_OFFSET_R12 80U
#define CPU_CONTEXT_OFFSET_R13 88U
#define CPU_CONTEXT_OFFSET_R14 96U
#define CPU_CONTEXT_OFFSET_R15 104U
#define CPU_CONTEXT_OFFSET_RDI 112U
#define CPU_CONTEXT_OFFSET_CR0 120U
#define CPU_CONTEXT_OFFSET_CR2 128U
#define CPU_CONTEXT_OFFSET_CR3 136U
#define CPU_CONTEXT_OFFSET_CR4 144U
#define CPU_CONTEXT_OFFSET_RIP 152U
#define CPU_CONTEXT_OFFSET_RSP 160U
#define CPU_CONTEXT_OFFSET_RCX 8U
#define CPU_CONTEXT_OFFSET_RDX 16U
#define CPU_CONTEXT_OFFSET_RBX 24U
#define CPU_CONTEXT_OFFSET_RSP 32U
#define CPU_CONTEXT_OFFSET_RBP 40U
#define CPU_CONTEXT_OFFSET_RSI 48U
#define CPU_CONTEXT_OFFSET_RDI 56U
#define CPU_CONTEXT_OFFSET_R8 64U
#define CPU_CONTEXT_OFFSET_R9 72U
#define CPU_CONTEXT_OFFSET_R10 80U
#define CPU_CONTEXT_OFFSET_R11 88U
#define CPU_CONTEXT_OFFSET_R12 96U
#define CPU_CONTEXT_OFFSET_R13 104U
#define CPU_CONTEXT_OFFSET_R14 112U
#define CPU_CONTEXT_OFFSET_R15 120U
#define CPU_CONTEXT_OFFSET_CR0 128U
#define CPU_CONTEXT_OFFSET_CR2 136U
#define CPU_CONTEXT_OFFSET_CR3 144U
#define CPU_CONTEXT_OFFSET_CR4 152U
#define CPU_CONTEXT_OFFSET_RIP 160U
#define CPU_CONTEXT_OFFSET_RFLAGS 168U
#define CPU_CONTEXT_OFFSET_TSC_OFFSET 184U
#define CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL 192U
@ -94,13 +78,18 @@ enum vm_cpu_mode {
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
struct cpu_regs {
/* General-purpose register layout aligned with the general-purpose register idx
* when vmexit, such as vmexit due to CR access, refer to SMD Vol.3C 27-6.
*/
struct cpu_gp_regs {
uint64_t rax;
uint64_t rbx;
uint64_t rcx;
uint64_t rdx;
uint64_t rbx;
uint64_t rsp;
uint64_t rbp;
uint64_t rsi;
uint64_t rdi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
@ -109,7 +98,6 @@ struct cpu_regs {
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rdi;
};
struct segment {
@ -125,7 +113,7 @@ struct run_context {
* in vmx_asm.S match
*/
union {
struct cpu_regs regs;
struct cpu_gp_regs regs;
uint64_t longs[NUM_GPRS];
} guest_cpu_regs;
@ -140,7 +128,6 @@ struct run_context {
uint64_t cr4;
uint64_t rip;
uint64_t rsp;
uint64_t rflags;
uint64_t dr7;