vcpu: add get/set register APIs

there will be 3 types of vcpu runtime contexts:
- runtime contexts always saved/restored during VM exit/entry, which
  include general registers rax/rcx/rdx/rbx/rbp/rsi/rdi/r8~r15, cr2 and
  msr for spectre control (ia32_spec_ctrl)
- runtime contexts on-demand cached/updated during VM exit/entry, which
  include frequently used registers rsp, rip, efer, rflags, cr0 and cr4
- runtime contexts always read/write from/to VMCS, which include left
  registers not in above

this patch add get/set register APIs for vcpu runtime contexts, and unified
the save/restore method for them according to above description.

v3:
- update vcpu_get/set_cr0/4 as unified interface to get/set guest cr0/cr4,
  use on-demand cache for reading, but always write to VMCS for writing.

v2:
- use reg_cached/reg_updated for on-demand runtime contexts
- always read/write cr3 from/to VMCS

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-07-26 14:56:47 +08:00 committed by lijinxia
parent 5aa1ad3bfc
commit 3d5d6c96ec
20 changed files with 408 additions and 325 deletions

View File

@ -110,8 +110,6 @@ inline bool vm_lapic_disabled(struct vm *vm)
enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu) enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
{ {
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
enum vm_cpu_mode cpu_mode; enum vm_cpu_mode cpu_mode;
cpu_mode = get_vcpu_mode(vcpu); cpu_mode = get_vcpu_mode(vcpu);
@ -120,10 +118,10 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
return PAGING_MODE_0_LEVEL; return PAGING_MODE_0_LEVEL;
} }
else if (cpu_mode == CPU_MODE_PROTECTED) { else if (cpu_mode == CPU_MODE_PROTECTED) {
if ((cur_context->cr4 & CR4_PAE) != 0U) { if ((vcpu_get_cr4(vcpu) & CR4_PAE) != 0U) {
return PAGING_MODE_3_LEVEL; return PAGING_MODE_3_LEVEL;
} }
else if ((cur_context->cr0 & CR0_PG) != 0U) { else if ((vcpu_get_cr0(vcpu) & CR0_PG) != 0U) {
return PAGING_MODE_2_LEVEL; return PAGING_MODE_2_LEVEL;
} }
return PAGING_MODE_0_LEVEL; return PAGING_MODE_0_LEVEL;
@ -273,8 +271,6 @@ out:
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
uint32_t *err_code) uint32_t *err_code)
{ {
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu); enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
struct page_walk_info pw_info; struct page_walk_info pw_info;
int ret = 0; int ret = 0;
@ -284,15 +280,14 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
} }
*gpa = 0UL; *gpa = 0UL;
pw_info.top_entry = cur_context->cr3; pw_info.top_entry = exec_vmread(VMX_GUEST_CR3);
pw_info.level = pm; pw_info.level = pm;
pw_info.is_write_access = ((*err_code & PAGE_FAULT_WR_FLAG) != 0U); pw_info.is_write_access = ((*err_code & PAGE_FAULT_WR_FLAG) != 0U);
pw_info.is_inst_fetch = ((*err_code & PAGE_FAULT_ID_FLAG) != 0U); pw_info.is_inst_fetch = ((*err_code & PAGE_FAULT_ID_FLAG) != 0U);
pw_info.is_user_mode = ((exec_vmread16(VMX_GUEST_CS_SEL) & 0x3U) == 3U); pw_info.is_user_mode = ((exec_vmread16(VMX_GUEST_CS_SEL) & 0x3U) == 3U);
pw_info.pse = true; pw_info.pse = true;
pw_info.nxe = pw_info.nxe = ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_NXE_BIT) != 0UL);
((cur_context->ia32_efer & MSR_IA32_EFER_NXE_BIT) != 0UL); pw_info.wp = ((vcpu_get_cr0(vcpu) & CR0_WP) != 0UL);
pw_info.wp = ((cur_context->cr0 & CR0_WP) != 0UL);
*err_code &= ~PAGE_FAULT_P_FLAG; *err_code &= ~PAGE_FAULT_P_FLAG;
@ -304,7 +299,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
ret = local_gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code); ret = local_gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
} else if (pm == PAGING_MODE_2_LEVEL) { } else if (pm == PAGING_MODE_2_LEVEL) {
pw_info.width = 10U; pw_info.width = 10U;
pw_info.pse = ((cur_context->cr4 & CR4_PSE) != 0UL); pw_info.pse = ((vcpu_get_cr4(vcpu) & CR4_PSE) != 0UL);
pw_info.nxe = false; pw_info.nxe = false;
ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code); ret = local_gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else { } else {

View File

@ -1643,8 +1643,7 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
int int
vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu) vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu)
{ {
uint64_t guest_rip_gva = uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
uint32_t inst_len = vcpu->arch_vcpu.inst_len; uint32_t inst_len = vcpu->arch_vcpu.inst_len;
uint32_t err_code; uint32_t err_code;
int ret; int ret;

View File

@ -26,8 +26,6 @@ is_descriptor_table(enum cpu_reg_name reg);
int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval) int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval)
{ {
struct run_context *cur_context;
if (vcpu == NULL) { if (vcpu == NULL) {
return -EINVAL; return -EINVAL;
} }
@ -37,10 +35,9 @@ int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval)
} }
if ((reg >= CPU_REG_GENERAL_FIRST) && (reg <= CPU_REG_GENERAL_LAST)) { if ((reg >= CPU_REG_GENERAL_FIRST) && (reg <= CPU_REG_GENERAL_LAST)) {
cur_context = *retval = vcpu_get_gpreg(vcpu, reg);
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context]; } else if ((reg >= CPU_REG_NONGENERAL_FIRST) &&
*retval = cur_context->guest_cpu_regs.longs[reg]; (reg <= CPU_REG_NONGENERAL_LAST)) {
} else if ((reg >= CPU_REG_NONGENERAL_FIRST) && (reg <= CPU_REG_NONGENERAL_LAST)) {
uint32_t field = get_vmcs_field(reg); uint32_t field = get_vmcs_field(reg);
if (field != VMX_INVALID_VMCS_FIELD) { if (field != VMX_INVALID_VMCS_FIELD) {
@ -61,8 +58,6 @@ int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval)
int vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t val) int vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t val)
{ {
struct run_context *cur_context;
if (vcpu == NULL) { if (vcpu == NULL) {
return -EINVAL; return -EINVAL;
} }
@ -72,10 +67,9 @@ int vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t val)
} }
if ((reg >= CPU_REG_GENERAL_FIRST) && (reg <= CPU_REG_GENERAL_LAST)) { if ((reg >= CPU_REG_GENERAL_FIRST) && (reg <= CPU_REG_GENERAL_LAST)) {
cur_context = vcpu_set_gpreg(vcpu, reg, val);
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context]; } else if ((reg >= CPU_REG_NONGENERAL_FIRST) &&
cur_context->guest_cpu_regs.longs[reg] = val; (reg <= CPU_REG_NONGENERAL_LAST)) {
} else if ((reg >= CPU_REG_NONGENERAL_FIRST) && (reg <= CPU_REG_NONGENERAL_LAST)) {
uint32_t field = get_vmcs_field(reg); uint32_t field = get_vmcs_field(reg);
if (field != VMX_INVALID_VMCS_FIELD) { if (field != VMX_INVALID_VMCS_FIELD) {
@ -305,8 +299,7 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu
ASSERT(emul_ctxt != NULL && vcpu != NULL, "Error in input arguments"); ASSERT(emul_ctxt != NULL && vcpu != NULL, "Error in input arguments");
cpl = (uint8_t)((csar >> 5) & 3U); cpl = (uint8_t)((csar >> 5) & 3U);
emul_ctxt->paging.cr3 = emul_ctxt->paging.cr3 = exec_vmread(VMX_GUEST_CR3);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr3;
emul_ctxt->paging.cpl = cpl; emul_ctxt->paging.cpl = cpl;
emul_ctxt->paging.cpu_mode = get_vcpu_mode(vcpu); emul_ctxt->paging.cpu_mode = get_vcpu_mode(vcpu);
emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu); emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu);
@ -348,8 +341,7 @@ int decode_instruction(struct vcpu *vcpu)
if (retval < 0) { if (retval < 0) {
if (retval != -EFAULT) { if (retval != -EFAULT) {
pr_err("decode instruction failed @ 0x%016llx:", pr_err("decode instruction failed @ 0x%016llx:",
vcpu->arch_vcpu. vcpu_get_rip(vcpu));
contexts[vcpu->arch_vcpu.cur_context].rip);
} }
return retval; return retval;
} }
@ -363,7 +355,7 @@ int decode_instruction(struct vcpu *vcpu)
if (retval != 0) { if (retval != 0) {
pr_err("decode instruction failed @ 0x%016llx:", pr_err("decode instruction failed @ 0x%016llx:",
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip); vcpu_get_rip(vcpu));
return -EINVAL; return -EINVAL;
} }

View File

@ -31,65 +31,6 @@
#define INSTR_EMUL_WRAPPER_H #define INSTR_EMUL_WRAPPER_H
#include <cpu.h> #include <cpu.h>
/**
*
* Identifiers for architecturally defined registers.
*
* These register names is used in condition statement.
* Within the following groups,register name need to be
* kept in order:
* General register names group (CPU_REG_RAX~CPU_REG_R15);
* Non general register names group (CPU_REG_CR0~CPU_REG_GDTR);
* Segement register names group (CPU_REG_ES~CPU_REG_GS).
*/
enum cpu_reg_name {
/* General purpose register layout should align with
* struct cpu_gp_regs
*/
CPU_REG_RAX,
CPU_REG_RCX,
CPU_REG_RDX,
CPU_REG_RBX,
CPU_REG_RSP,
CPU_REG_RBP,
CPU_REG_RSI,
CPU_REG_RDI,
CPU_REG_R8,
CPU_REG_R9,
CPU_REG_R10,
CPU_REG_R11,
CPU_REG_R12,
CPU_REG_R13,
CPU_REG_R14,
CPU_REG_R15,
CPU_REG_CR0,
CPU_REG_CR2,
CPU_REG_CR3,
CPU_REG_CR4,
CPU_REG_DR7,
CPU_REG_RIP,
CPU_REG_RFLAGS,
/*CPU_REG_NATURAL_LAST*/
CPU_REG_EFER,
CPU_REG_PDPTE0,
CPU_REG_PDPTE1,
CPU_REG_PDPTE2,
CPU_REG_PDPTE3,
/*CPU_REG_64BIT_LAST,*/
CPU_REG_ES,
CPU_REG_CS,
CPU_REG_SS,
CPU_REG_DS,
CPU_REG_FS,
CPU_REG_GS,
CPU_REG_LDTR,
CPU_REG_TR,
CPU_REG_IDTR,
CPU_REG_GDTR
/*CPU_REG_LAST*/
};
/** /**
* Define the following MACRO to make range checking clear. * Define the following MACRO to make range checking clear.
* *

View File

@ -14,6 +14,141 @@ extern struct efi_ctx* efi_ctx;
vm_sw_loader_t vm_sw_loader; vm_sw_loader_t vm_sw_loader;
inline uint64_t vcpu_get_gpreg(struct vcpu *vcpu, uint32_t reg)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
return cur_context->guest_cpu_regs.longs[reg];
}
inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
cur_context->guest_cpu_regs.longs[reg] = val;
}
inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
{
if (bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_RIP, &vcpu->reg_cached) == 0)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip =
exec_vmread(VMX_GUEST_RIP);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
}
inline void vcpu_set_rip(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip = val;
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
return cur_context->guest_cpu_regs.regs.rsp;
}
inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
cur_context->guest_cpu_regs.regs.rsp = val;
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
{
if (bitmap_test(CPU_REG_EFER, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_EFER, &vcpu->reg_cached) == 0)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer
= exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer;
}
inline void vcpu_set_efer(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_efer = val;
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
{
if (bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) == 0 &&
bitmap_test_and_set_lock(CPU_REG_RFLAGS,
&vcpu->reg_cached) == 0 && vcpu->launched)
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags =
exec_vmread(VMX_GUEST_RFLAGS);
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags;
}
inline void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rflags = val;
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
{
uint64_t mask;
if (bitmap_test_and_set_lock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
mask = exec_vmread(VMX_CR0_MASK);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr0 =
(exec_vmread(VMX_CR0_READ_SHADOW) & mask) |
(exec_vmread(VMX_GUEST_CR0) & (~mask));
}
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr0;
}
inline int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
{
return vmx_write_cr0(vcpu, val);
}
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
{
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr2;
}
inline void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr2 = val;
}
inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
{
uint64_t mask;
if (bitmap_test_and_set_lock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
mask = exec_vmread(VMX_CR4_MASK);
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr4 =
(exec_vmread(VMX_CR4_READ_SHADOW) & mask) |
(exec_vmread(VMX_GUEST_CR4) & (~mask));
}
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr4;
}
inline int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
{
return vmx_write_cr4(vcpu, val);
}
inline uint64_t vcpu_get_pat_ext(struct vcpu *vcpu)
{
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_pat;
}
inline void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ia32_pat = val;
}
struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id) struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
{ {
return per_cpu(ever_run_vcpu, pcpu_id); return per_cpu(ever_run_vcpu, pcpu_id);
@ -128,15 +263,12 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr) static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr)
{ {
struct run_context *cur_context = if (vcpu_get_efer(vcpu) & MSR_IA32_EFER_LMA_BIT) {
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
if (cur_context->ia32_efer & MSR_IA32_EFER_LMA_BIT) {
if (cs_attr & 0x2000) /* CS.L = 1 */ if (cs_attr & 0x2000) /* CS.L = 1 */
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT; vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
else else
vcpu->arch_vcpu.cpu_mode = CPU_MODE_COMPATIBILITY; vcpu->arch_vcpu.cpu_mode = CPU_MODE_COMPATIBILITY;
} else if (cur_context->cr0 & CR0_PE) { } else if (vcpu_get_cr0(vcpu) & CR0_PE) {
vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED; vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED;
} else { } else {
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL; vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
@ -153,6 +285,17 @@ int start_vcpu(struct vcpu *vcpu)
ASSERT(vcpu != NULL, "Incorrect arguments"); ASSERT(vcpu != NULL, "Incorrect arguments");
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RIP, cur_context->rip);
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RSP,
cur_context->guest_cpu_regs.regs.rsp);
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated))
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL,
cur_context->ia32_efer);
if (bitmap_test_and_clear_lock(CPU_REG_RFLAGS, &vcpu->reg_updated))
exec_vmwrite(VMX_GUEST_RFLAGS, cur_context->rflags);
/* If this VCPU is not already launched, launch it */ /* If this VCPU is not already launched, launch it */
if (!vcpu->launched) { if (!vcpu->launched) {
pr_info("VM %d Starting VCPU %hu", pr_info("VM %d Starting VCPU %hu",
@ -193,27 +336,22 @@ int start_vcpu(struct vcpu *vcpu)
* instruction needs to be repeated and resume VCPU accordingly * instruction needs to be repeated and resume VCPU accordingly
*/ */
instlen = vcpu->arch_vcpu.inst_len; instlen = vcpu->arch_vcpu.inst_len;
rip = cur_context->rip; rip = vcpu_get_rip(vcpu);
exec_vmwrite(VMX_GUEST_RIP, ((rip +(uint64_t)instlen) & exec_vmwrite(VMX_GUEST_RIP, ((rip+(uint64_t)instlen) &
0xFFFFFFFFFFFFFFFFUL)); 0xFFFFFFFFFFFFFFFFUL));
/* Resume the VM */ /* Resume the VM */
status = vmx_vmrun(cur_context, VM_RESUME, ibrs_type); status = vmx_vmrun(cur_context, VM_RESUME, ibrs_type);
} }
/* Save guest CR3 register */ vcpu->reg_cached = 0UL;
cur_context->cr3 = exec_vmread(VMX_GUEST_CR3);
/* Save guest IA32_EFER register */
cur_context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
set_vcpu_mode(vcpu, exec_vmread32(VMX_GUEST_CS_ATTR)); set_vcpu_mode(vcpu, exec_vmread32(VMX_GUEST_CS_ATTR));
/* Obtain current VCPU instruction pointer and length */ /* Obtain current VCPU instruction length */
cur_context->rip = exec_vmread(VMX_GUEST_RIP);
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN); vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
cur_context->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP); cur_context->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
cur_context->rflags = exec_vmread(VMX_GUEST_RFLAGS);
/* Obtain VM exit reason */ /* Obtain VM exit reason */
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON); vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);

View File

@ -427,7 +427,7 @@ static void vlapic_set_tsc_deadline_msr(struct acrn_vlapic *vlapic,
struct vcpu_arch *arch = &vlapic->vcpu->arch_vcpu; struct vcpu_arch *arch = &vlapic->vcpu->arch_vcpu;
/* transfer guest tsc to host tsc */ /* transfer guest tsc to host tsc */
val -= arch->contexts[arch->cur_context].tsc_offset; val -= exec_vmread64(VMX_TSC_OFFSET_FULL);
timer->fire_tsc = val; timer->fire_tsc = val;
add_timer(timer); add_timer(timer);

View File

@ -16,14 +16,12 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
{ {
int32_t ret = -EACCES; int32_t ret = -EACCES;
struct vm *vm = vcpu->vm; struct vm *vm = vcpu->vm;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
/* hypercall ID from guest*/ /* hypercall ID from guest*/
uint64_t hypcall_id = cur_context->guest_cpu_regs.regs.r8; uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8);
/* hypercall param1 from guest*/ /* hypercall param1 from guest*/
uint64_t param1 = cur_context->guest_cpu_regs.regs.rdi; uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI);
/* hypercall param2 from guest*/ /* hypercall param2 from guest*/
uint64_t param2 = cur_context->guest_cpu_regs.regs.rsi; uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI);
if (!is_hypercall_from_ring0()) { if (!is_hypercall_from_ring0()) {
pr_err("hypercall is only allowed from RING-0!\n"); pr_err("hypercall is only allowed from RING-0!\n");
@ -179,7 +177,7 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
} }
out: out:
cur_context->guest_cpu_regs.regs.rax = (uint64_t)ret; vcpu_set_gpreg(vcpu, CPU_REG_RAX, (uint64_t)ret);
TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id); TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id);

View File

@ -147,10 +147,9 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
int err = 0; int err = 0;
uint32_t msr; uint32_t msr;
uint64_t v = 0UL; uint64_t v = 0UL;
int cur_context = vcpu->arch_vcpu.cur_context;
/* Read the msr value */ /* Read the msr value */
msr = vcpu->arch_vcpu.contexts[cur_context].guest_cpu_regs.regs.rcx; msr = vcpu_get_gpreg(vcpu, CPU_REG_RCX);
/* Do the required processing for each msr case */ /* Do the required processing for each msr case */
switch (msr) { switch (msr) {
@ -162,7 +161,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
case MSR_IA32_TIME_STAMP_COUNTER: case MSR_IA32_TIME_STAMP_COUNTER:
{ {
/* Add the TSC_offset to host TSC to get guest TSC */ /* Add the TSC_offset to host TSC to get guest TSC */
v = rdtsc() + vcpu->arch_vcpu.contexts[cur_context].tsc_offset; v = rdtsc() + exec_vmread64(VMX_TSC_OFFSET_FULL);
break; break;
} }
case MSR_IA32_MTRR_CAP: case MSR_IA32_MTRR_CAP:
@ -244,10 +243,8 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
} }
/* Store the MSR contents in RAX and RDX */ /* Store the MSR contents in RAX and RDX */
vcpu->arch_vcpu.contexts[cur_context].guest_cpu_regs.regs.rax = vcpu_set_gpreg(vcpu, CPU_REG_RAX, v & 0xffffffffU);
v & 0xffffffffU; vcpu_set_gpreg(vcpu, CPU_REG_RDX, v >> 32U);
vcpu->arch_vcpu.contexts[cur_context].guest_cpu_regs.regs.rdx =
v >> 32U;
TRACE_2L(TRACE_VMEXIT_RDMSR, msr, v); TRACE_2L(TRACE_VMEXIT_RDMSR, msr, v);
@ -259,15 +256,13 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
int err = 0; int err = 0;
uint32_t msr; uint32_t msr;
uint64_t v; uint64_t v;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
/* Read the MSR ID */ /* Read the MSR ID */
msr = (uint32_t)cur_context->guest_cpu_regs.regs.rcx; msr = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RCX);
/* Get the MSR contents */ /* Get the MSR contents */
v = (cur_context->guest_cpu_regs.regs.rdx << 32U) | v = (vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U) |
cur_context->guest_cpu_regs.regs.rax; vcpu_get_gpreg(vcpu, CPU_REG_RAX);
/* Do the required processing for each msr case */ /* Do the required processing for each msr case */
switch (msr) { switch (msr) {
@ -279,8 +274,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
case MSR_IA32_TIME_STAMP_COUNTER: case MSR_IA32_TIME_STAMP_COUNTER:
{ {
/*Caculate TSC offset from changed TSC MSR value*/ /*Caculate TSC offset from changed TSC MSR value*/
cur_context->tsc_offset = v - rdtsc(); exec_vmwrite64(VMX_TSC_OFFSET_FULL, v - rdtsc());
exec_vmwrite64(VMX_TSC_OFFSET_FULL, cur_context->tsc_offset);
break; break;
} }

View File

@ -29,13 +29,10 @@ emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
if (pio_req->direction == REQUEST_READ) { if (pio_req->direction == REQUEST_READ) {
uint64_t value = (uint64_t)pio_req->value; uint64_t value = (uint64_t)pio_req->value;
int32_t context_idx = vcpu->arch_vcpu.cur_context; int32_t context_idx = vcpu->arch_vcpu.cur_context;
struct run_context *cur_context; uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
uint64_t *rax;
cur_context = &vcpu->arch_vcpu.contexts[context_idx]; rax = ((rax) & ~mask) | (value & mask);
rax = &cur_context->guest_cpu_regs.regs.rax; vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
*rax = ((*rax) & ~mask) | (value & mask);
} }
status = 0; status = 0;
} else { } else {
@ -323,14 +320,10 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
int32_t status; int32_t status;
uint64_t exit_qual; uint64_t exit_qual;
int32_t cur_context_idx = vcpu->arch_vcpu.cur_context; int32_t cur_context_idx = vcpu->arch_vcpu.cur_context;
struct run_context *cur_context;
struct cpu_gp_regs *regs;
struct io_request *io_req = &vcpu->req; struct io_request *io_req = &vcpu->req;
struct pio_request *pio_req = &io_req->reqs.pio; struct pio_request *pio_req = &io_req->reqs.pio;
exit_qual = vcpu->arch_vcpu.exit_qualification; exit_qual = vcpu->arch_vcpu.exit_qualification;
cur_context = &vcpu->arch_vcpu.contexts[cur_context_idx];
regs = &cur_context->guest_cpu_regs.regs;
io_req->type = REQ_PORTIO; io_req->type = REQ_PORTIO;
io_req->processed = REQ_STATE_PENDING; io_req->processed = REQ_STATE_PENDING;
@ -338,7 +331,7 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual); pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) { if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
pio_req->direction = REQUEST_WRITE; pio_req->direction = REQUEST_WRITE;
pio_req->value = (uint32_t)regs->rax; pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX);
} else { } else {
pio_req->direction = REQUEST_READ; pio_req->direction = REQUEST_READ;
} }

View File

@ -52,13 +52,11 @@ static const uint16_t exception_type[32] = {
static bool is_guest_irq_enabled(struct vcpu *vcpu) static bool is_guest_irq_enabled(struct vcpu *vcpu)
{ {
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint64_t guest_rflags, guest_state; uint64_t guest_rflags, guest_state;
bool status = false; bool status = false;
/* Read the RFLAGS of the guest */ /* Read the RFLAGS of the guest */
guest_rflags = cur_context->rflags; guest_rflags = vcpu_get_rflags(vcpu);
/* Check the RFLAGS[IF] bit first */ /* Check the RFLAGS[IF] bit first */
if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) { if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) {
/* Interrupts are allowed */ /* Interrupts are allowed */
@ -302,10 +300,7 @@ void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code)
void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code) void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code)
{ {
struct run_context *cur_context = vcpu_set_cr2(vcpu, addr);
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
cur_context->cr2 = addr;
vcpu_queue_exception(vcpu, IDT_PF, err_code); vcpu_queue_exception(vcpu, IDT_PF, err_code);
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP); vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
} }

View File

@ -243,14 +243,18 @@ static int unhandled_vmexit_handler(struct vcpu *vcpu)
int cpuid_vmexit_handler(struct vcpu *vcpu) int cpuid_vmexit_handler(struct vcpu *vcpu)
{ {
struct run_context *cur_context = uint64_t rax, rbx, rcx, rdx;
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
guest_cpuid(vcpu, rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
(uint32_t *)&cur_context->guest_cpu_regs.regs.rax, rbx = vcpu_get_gpreg(vcpu, CPU_REG_RBX);
(uint32_t *)&cur_context->guest_cpu_regs.regs.rbx, rcx = vcpu_get_gpreg(vcpu, CPU_REG_RCX);
(uint32_t *)&cur_context->guest_cpu_regs.regs.rcx, rdx = vcpu_get_gpreg(vcpu, CPU_REG_RDX);
(uint32_t *)&cur_context->guest_cpu_regs.regs.rdx); guest_cpuid(vcpu, (uint32_t *)&rax, (uint32_t *)&rbx,
(uint32_t *)&rcx, (uint32_t *)&rdx);
vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
vcpu_set_gpreg(vcpu, CPU_REG_RBX, rbx);
vcpu_set_gpreg(vcpu, CPU_REG_RCX, rcx);
vcpu_set_gpreg(vcpu, CPU_REG_RDX, rdx);
TRACE_2L(TRACE_VMEXIT_CPUID, (uint64_t)vcpu->vcpu_id, 0UL); TRACE_2L(TRACE_VMEXIT_CPUID, (uint64_t)vcpu->vcpu_id, 0UL);
@ -260,24 +264,22 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
int cr_access_vmexit_handler(struct vcpu *vcpu) int cr_access_vmexit_handler(struct vcpu *vcpu)
{ {
int err = 0; int err = 0;
uint64_t *regptr; uint64_t reg;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification); int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
ASSERT(idx>=0 && idx<=15, "index out of range"); ASSERT(idx>=0 && idx<=15, "index out of range");
regptr = cur_context->guest_cpu_regs.longs + idx; reg = vcpu_get_gpreg(vcpu, idx);
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
(vcpu->arch_vcpu.exit_qualification) << 4) | (vcpu->arch_vcpu.exit_qualification) << 4) |
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) { VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
case 0x00U: case 0x00U:
/* mov to cr0 */ /* mov to cr0 */
err = vmx_write_cr0(vcpu, *regptr); err = vcpu_set_cr0(vcpu, reg);
break; break;
case 0x04U: case 0x04U:
/* mov to cr4 */ /* mov to cr4 */
err = vmx_write_cr4(vcpu, *regptr); err = vcpu_set_cr4(vcpu, reg);
break; break;
case 0x08U: case 0x08U:
/* mov to cr8 */ /* mov to cr8 */
@ -285,16 +287,17 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
* *
* set reserved bit in CR8 causes GP to guest * set reserved bit in CR8 causes GP to guest
*/ */
if (*regptr & ~0xFUL) { if (reg & ~0xFUL) {
pr_dbg("Invalid cr8 write operation from guest"); pr_dbg("Invalid cr8 write operation from guest");
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
break; break;
} }
vlapic_set_cr8(vcpu->arch_vcpu.vlapic, *regptr); vlapic_set_cr8(vcpu->arch_vcpu.vlapic, reg);
break; break;
case 0x18U: case 0x18U:
/* mov from cr8 */ /* mov from cr8 */
*regptr = vlapic_get_cr8(vcpu->arch_vcpu.vlapic); reg = vlapic_get_cr8(vcpu->arch_vcpu.vlapic);
vcpu_set_gpreg(vcpu, idx, reg);
break; break;
default: default:
panic("Unhandled CR access"); panic("Unhandled CR access");
@ -318,7 +321,6 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
{ {
int idx; int idx;
uint64_t val64; uint64_t val64;
struct run_context *ctx_ptr;
val64 = exec_vmread(VMX_GUEST_CR4); val64 = exec_vmread(VMX_GUEST_CR4);
if ((val64 & CR4_OSXSAVE) == 0UL) { if ((val64 & CR4_OSXSAVE) == 0UL) {
@ -331,16 +333,14 @@ static int xsetbv_vmexit_handler(struct vcpu *vcpu)
return -1; return -1;
} }
ctx_ptr = &(vcpu->arch_vcpu.contexts[idx]);
/*to access XCR0,'rcx' should be 0*/ /*to access XCR0,'rcx' should be 0*/
if (ctx_ptr->guest_cpu_regs.regs.rcx != 0UL) { if (vcpu_get_gpreg(vcpu, CPU_REG_RCX) != 0UL) {
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return 0; return 0;
} }
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffffUL) | val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
(ctx_ptr->guest_cpu_regs.regs.rdx << 32U); (vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
/*bit 0(x87 state) of XCR0 can't be cleared*/ /*bit 0(x87 state) of XCR0 can't be cleared*/
if ((val64 & 0x01UL) == 0UL) { if ((val64 & 0x01UL) == 0UL) {

View File

@ -314,23 +314,18 @@ static void init_cr0_cr4_host_mask(__unused struct vcpu *vcpu)
uint64_t vmx_rdmsr_pat(struct vcpu *vcpu) uint64_t vmx_rdmsr_pat(struct vcpu *vcpu)
{ {
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
/* /*
* note: if context->cr0.CD is set, the actual value in guest's * note: if context->cr0.CD is set, the actual value in guest's
* IA32_PAT MSR is PAT_ALL_UC_VALUE, which may be different from * IA32_PAT MSR is PAT_ALL_UC_VALUE, which may be different from
* the saved value context->ia32_pat * the saved value context->ia32_pat
*/ */
return context->ia32_pat; return vcpu_get_pat_ext(vcpu);
} }
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value) int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
{ {
uint32_t i; uint32_t i;
uint64_t field; uint64_t field;
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
for (i = 0U; i < 8U; i++) { for (i = 0U; i < 8U; i++) {
field = (value >> (i * 8U)) & 0xffUL; field = (value >> (i * 8U)) & 0xffUL;
@ -342,23 +337,21 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
} }
} }
context->ia32_pat = value; vcpu_set_pat_ext(vcpu, value);
/* /*
* If context->cr0.CD is set, we defer any further requests to write * If context->cr0.CD is set, we defer any further requests to write
* guest's IA32_PAT, until the time when guest's CR0.CD is being cleared * guest's IA32_PAT, until the time when guest's CR0.CD is being cleared
*/ */
if ((context->cr0 & CR0_CD) == 0UL) { if ((vcpu_get_cr0(vcpu) & CR0_CD) == 0UL) {
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value); exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value);
} }
return 0; return 0;
} }
static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0) static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
{ {
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
/* Shouldn't set always off bit */ /* Shouldn't set always off bit */
if ((cr0 & cr0_always_off_mask) != 0UL) if ((cr0 & cr0_always_off_mask) != 0UL)
return false; return false;
@ -370,8 +363,8 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
* CR0.PG = 1, CR4.PAE = 0 and IA32_EFER.LME = 1 is invalid. * CR0.PG = 1, CR4.PAE = 0 and IA32_EFER.LME = 1 is invalid.
* CR0.PE = 0 and CR0.PG = 1 is invalid. * CR0.PE = 0 and CR0.PG = 1 is invalid.
*/ */
if (((cr0 & CR0_PG) != 0UL) && ((context->cr4 & CR4_PAE) == 0UL) && if (((cr0 & CR0_PG) != 0UL) && ((vcpu_get_cr4(vcpu) & CR4_PAE) == 0UL)
((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0UL)) && ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL))
return false; return false;
if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL)) if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL))
@ -411,11 +404,9 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
*/ */
int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0) int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
{ {
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint64_t cr0_vmx; uint64_t cr0_vmx;
uint32_t entry_ctrls; uint32_t entry_ctrls;
bool paging_enabled = !!(context->cr0 & CR0_PG); bool paging_enabled = !!(vcpu_get_cr0(vcpu) & CR0_PG);
if (!is_cr0_write_valid(vcpu, cr0)) { if (!is_cr0_write_valid(vcpu, cr0)) {
pr_dbg("Invalid cr0 write operation from guest"); pr_dbg("Invalid cr0 write operation from guest");
@ -427,9 +418,10 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
* When loading a control register, reserved bit should always set * When loading a control register, reserved bit should always set
* to the value previously read. * to the value previously read.
*/ */
cr0 = (cr0 & ~CR0_RESERVED_MASK) | (context->cr0 & CR0_RESERVED_MASK); cr0 = (cr0 & ~CR0_RESERVED_MASK) |
(vcpu_get_cr0(vcpu) & CR0_RESERVED_MASK);
if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0UL) && if (((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) &&
!paging_enabled && ((cr0 & CR0_PG) != 0UL)) { !paging_enabled && ((cr0 & CR0_PG) != 0UL)) {
/* Enable long mode */ /* Enable long mode */
pr_dbg("VMM: Enable long mode"); pr_dbg("VMM: Enable long mode");
@ -437,9 +429,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
entry_ctrls |= VMX_ENTRY_CTLS_IA32E_MODE; entry_ctrls |= VMX_ENTRY_CTLS_IA32E_MODE;
exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls); exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls);
context->ia32_efer |= MSR_IA32_EFER_LMA_BIT; vcpu_set_efer(vcpu,
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer); vcpu_get_efer(vcpu) | MSR_IA32_EFER_LMA_BIT);
} else if (((context->ia32_efer & MSR_IA32_EFER_LME_BIT) != 0UL) && } else if (((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL) &&
paging_enabled && ((cr0 & CR0_PG) == 0UL)){ paging_enabled && ((cr0 & CR0_PG) == 0UL)){
/* Disable long mode */ /* Disable long mode */
pr_dbg("VMM: Disable long mode"); pr_dbg("VMM: Disable long mode");
@ -447,16 +439,16 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
entry_ctrls &= ~VMX_ENTRY_CTLS_IA32E_MODE; entry_ctrls &= ~VMX_ENTRY_CTLS_IA32E_MODE;
exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls); exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls);
context->ia32_efer &= ~MSR_IA32_EFER_LMA_BIT; vcpu_set_efer(vcpu,
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer); vcpu_get_efer(vcpu) & ~MSR_IA32_EFER_LMA_BIT);
} else { } else {
/* CR0.PG unchanged. */ /* CR0.PG unchanged. */
} }
/* If CR0.CD or CR0.NW get changed */ /* If CR0.CD or CR0.NW get changed */
if (((context->cr0 ^ cr0) & (CR0_CD | CR0_NW)) != 0UL) { if (((vcpu_get_cr0(vcpu) ^ cr0) & (CR0_CD | CR0_NW)) != 0UL) {
/* No action if only CR0.NW is changed */ /* No action if only CR0.NW is changed */
if (((context->cr0 ^ cr0) & CR0_CD) != 0UL) { if (((vcpu_get_cr0(vcpu) ^ cr0) & CR0_CD) != 0UL) {
if ((cr0 & CR0_CD) != 0UL) { if ((cr0 & CR0_CD) != 0UL) {
/* /*
* When the guest requests to set CR0.CD, we don't allow * When the guest requests to set CR0.CD, we don't allow
@ -468,7 +460,8 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
CACHE_FLUSH_INVALIDATE_ALL(); CACHE_FLUSH_INVALIDATE_ALL();
} else { } else {
/* Restore IA32_PAT to enable cache again */ /* Restore IA32_PAT to enable cache again */
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat); exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL,
vcpu_get_pat_ext(vcpu));
} }
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
} }
@ -483,7 +476,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
cr0_vmx &= ~(CR0_CD | CR0_NW); cr0_vmx &= ~(CR0_CD | CR0_NW);
exec_vmwrite(VMX_GUEST_CR0, cr0_vmx & 0xFFFFFFFFUL); exec_vmwrite(VMX_GUEST_CR0, cr0_vmx & 0xFFFFFFFFUL);
exec_vmwrite(VMX_CR0_READ_SHADOW, cr0 & 0xFFFFFFFFUL); exec_vmwrite(VMX_CR0_READ_SHADOW, cr0 & 0xFFFFFFFFUL);
context->cr0 = cr0;
/* clear read cache, next time read should from VMCS */
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0",
cr0, cr0_vmx); cr0, cr0_vmx);
@ -491,19 +486,6 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
return 0; return 0;
} }
int vmx_write_cr3(struct vcpu *vcpu, uint64_t cr3)
{
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
/* Write to guest's CR3 */
context->cr3 = cr3;
/* Commit new value to VMCS */
exec_vmwrite(VMX_GUEST_CR3, cr3);
return 0;
}
static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4) static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
{ {
/* Check if guest try to set fixed to 0 bits or reserved bits */ /* Check if guest try to set fixed to 0 bits or reserved bits */
@ -558,8 +540,6 @@ static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
*/ */
int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4) int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
{ {
struct run_context *context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint64_t cr4_vmx; uint64_t cr4_vmx;
if (!is_cr4_write_valid(vcpu, cr4)) { if (!is_cr4_write_valid(vcpu, cr4)) {
@ -572,7 +552,9 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
cr4_vmx = cr4_always_on_mask | cr4; cr4_vmx = cr4_always_on_mask | cr4;
exec_vmwrite(VMX_GUEST_CR4, cr4_vmx & 0xFFFFFFFFUL); exec_vmwrite(VMX_GUEST_CR4, cr4_vmx & 0xFFFFFFFFUL);
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4 & 0xFFFFFFFFUL); exec_vmwrite(VMX_CR4_READ_SHADOW, cr4 & 0xFFFFFFFFUL);
context->cr4 = cr4;
/* clear read cache, next time read should from VMCS */
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4",
cr4, cr4_vmx); cr4, cr4_vmx);
@ -583,7 +565,6 @@ int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
static void init_guest_state(struct vcpu *vcpu) static void init_guest_state(struct vcpu *vcpu)
{ {
uint32_t field; uint32_t field;
uint64_t value;
uint16_t value16; uint16_t value16;
uint32_t value32; uint32_t value32;
uint64_t value64; uint64_t value64;
@ -594,8 +575,6 @@ static void init_guest_state(struct vcpu *vcpu)
uint16_t es = 0U, ss = 0U, ds = 0U, fs = 0U, gs = 0U, data32_idx; uint16_t es = 0U, ss = 0U, ds = 0U, fs = 0U, gs = 0U, data32_idx;
uint16_t tr_sel = 0x70U; uint16_t tr_sel = 0x70U;
struct vm *vm = vcpu->vm; struct vm *vm = vcpu->vm;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
enum vm_cpu_mode vcpu_mode = get_vcpu_mode(vcpu); enum vm_cpu_mode vcpu_mode = get_vcpu_mode(vcpu);
pr_dbg("*********************"); pr_dbg("*********************");
@ -613,7 +592,7 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("Natural-width********"); pr_dbg("Natural-width********");
if (vcpu_mode == CPU_MODE_64BIT) { if (vcpu_mode == CPU_MODE_64BIT) {
cur_context->ia32_efer = MSR_IA32_EFER_LME_BIT; vcpu_set_efer(vcpu, MSR_IA32_EFER_LME_BIT);
} }
/* Setup guest control register values /* Setup guest control register values
@ -621,17 +600,18 @@ static void init_guest_state(struct vcpu *vcpu)
* checked. * checked.
*/ */
if (vcpu_mode == CPU_MODE_REAL) { if (vcpu_mode == CPU_MODE_REAL) {
vmx_write_cr4(vcpu, 0UL); vcpu_set_cr4(vcpu, 0UL);
vmx_write_cr3(vcpu, 0UL); exec_vmwrite(VMX_GUEST_CR3, 0UL);
vmx_write_cr0(vcpu, CR0_ET | CR0_NE); vcpu_set_cr0(vcpu, CR0_ET | CR0_NE);
} else if (vcpu_mode == CPU_MODE_PROTECTED) { } else if (vcpu_mode == CPU_MODE_PROTECTED) {
vmx_write_cr4(vcpu, 0UL); vcpu_set_cr4(vcpu, 0UL);
vmx_write_cr3(vcpu, 0UL); exec_vmwrite(VMX_GUEST_CR3, 0UL);
vmx_write_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE); vcpu_set_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE);
} else if (vcpu_mode == CPU_MODE_64BIT) { } else if (vcpu_mode == CPU_MODE_64BIT) {
vmx_write_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE); vcpu_set_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
vmx_write_cr3(vcpu, vm->arch_vm.guest_init_pml4 | CR3_PWT); exec_vmwrite(VMX_GUEST_CR3,
vmx_write_cr0(vcpu, CR0_PG | CR0_PE | CR0_NE); vm->arch_vm.guest_init_pml4 | CR3_PWT);
vcpu_set_cr0(vcpu, CR0_PG | CR0_PE | CR0_NE);
} else { } else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */ /* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
} }
@ -639,10 +619,8 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/ /***************************************************/
/* Set up Flags - the value of RFLAGS on VM entry */ /* Set up Flags - the value of RFLAGS on VM entry */
/***************************************************/ /***************************************************/
field = VMX_GUEST_RFLAGS; vcpu_set_rflags(vcpu, 0x2UL); /* Bit 1 is a active high reserved bit */
cur_context->rflags = 0x2UL; /* Bit 1 is a active high reserved bit */ pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", vcpu_get_rflags(vcpu));
exec_vmwrite(field, cur_context->rflags);
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", cur_context->rflags);
/***************************************************/ /***************************************************/
/* Set Code Segment - CS */ /* Set Code Segment - CS */
@ -1023,10 +1001,9 @@ static void init_guest_state(struct vcpu *vcpu)
value32); value32);
value64 = PAT_POWER_ON_VALUE; value64 = PAT_POWER_ON_VALUE;
cur_context->ia32_pat = value64;
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value64); exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value64);
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ", pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ", value64);
value64); vcpu_set_pat_ext(vcpu, value64);
value64 = 0UL; value64 = 0UL;
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, value64); exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, value64);
@ -1561,17 +1538,15 @@ static void init_exit_ctrl(__unused struct vcpu *vcpu)
static void override_uefi_vmcs(struct vcpu *vcpu) static void override_uefi_vmcs(struct vcpu *vcpu)
{ {
uint32_t field; uint32_t field;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) { if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
/* CR4 should be set before CR0, because when set CR0, CR4 value /* CR4 should be set before CR0, because when set CR0, CR4 value
* will be checked. */ * will be checked. */
/* VMXE is always on bit when set CR4, and not allowed to be set /* VMXE is always on bit when set CR4, and not allowed to be set
* from input cr4 value */ * from input cr4 value */
vmx_write_cr4(vcpu, efi_ctx->cr4 & ~CR4_VMXE); vcpu_set_cr4(vcpu, efi_ctx->cr4 & ~CR4_VMXE);
vmx_write_cr3(vcpu, efi_ctx->cr3); exec_vmwrite(VMX_GUEST_CR3, efi_ctx->cr3);
vmx_write_cr0(vcpu, efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE); vcpu_set_cr0(vcpu, efi_ctx->cr0 | CR0_PG | CR0_PE | CR0_NE);
/* Selector */ /* Selector */
field = VMX_GUEST_CS_SEL; field = VMX_GUEST_CS_SEL;
@ -1642,11 +1617,9 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
} }
/* Interrupt */ /* Interrupt */
field = VMX_GUEST_RFLAGS;
/* clear flags for CF/PF/AF/ZF/SF/OF */ /* clear flags for CF/PF/AF/ZF/SF/OF */
cur_context->rflags = efi_ctx->rflags & ~(0x8d5UL); vcpu_set_rflags(vcpu, efi_ctx->rflags & ~(0x8d5UL));
exec_vmwrite(field, cur_context->rflags); pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", vcpu_get_rflags(vcpu));
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", cur_context->rflags);
} }
#endif #endif

View File

@ -54,8 +54,6 @@ void efi_spurious_handler(int vector)
int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu) int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
{ {
int ret = 0; int ret = 0;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
ASSERT(vm != NULL, "Incorrect argument"); ASSERT(vm != NULL, "Incorrect argument");
@ -67,21 +65,21 @@ int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
vlapic_restore(vcpu->arch_vcpu.vlapic, &uefi_lapic_regs); vlapic_restore(vcpu->arch_vcpu.vlapic, &uefi_lapic_regs);
vcpu->entry_addr = (void *)efi_ctx->rip; vcpu->entry_addr = (void *)efi_ctx->rip;
cur_context->guest_cpu_regs.regs.rax = efi_ctx->rax; vcpu_set_gpreg(vcpu, CPU_REG_RAX, efi_ctx->rax);
cur_context->guest_cpu_regs.regs.rbx = efi_ctx->rbx; vcpu_set_gpreg(vcpu, CPU_REG_RBX, efi_ctx->rbx);
cur_context->guest_cpu_regs.regs.rdx = efi_ctx->rcx; vcpu_set_gpreg(vcpu, CPU_REG_RCX, efi_ctx->rcx);
cur_context->guest_cpu_regs.regs.rcx = efi_ctx->rdx; vcpu_set_gpreg(vcpu, CPU_REG_RDX, efi_ctx->rdx);
cur_context->guest_cpu_regs.regs.rdi = efi_ctx->rdi; vcpu_set_gpreg(vcpu, CPU_REG_RDI, efi_ctx->rdi);
cur_context->guest_cpu_regs.regs.rsi = efi_ctx->rsi; vcpu_set_gpreg(vcpu, CPU_REG_RSI, efi_ctx->rsi);
cur_context->guest_cpu_regs.regs.rbp = efi_ctx->rbp; vcpu_set_gpreg(vcpu, CPU_REG_RBP, efi_ctx->rbp);
cur_context->guest_cpu_regs.regs.r8 = efi_ctx->r8; vcpu_set_gpreg(vcpu, CPU_REG_R8, efi_ctx->r8);
cur_context->guest_cpu_regs.regs.r9 = efi_ctx->r9; vcpu_set_gpreg(vcpu, CPU_REG_R9, efi_ctx->r9);
cur_context->guest_cpu_regs.regs.r10 = efi_ctx->r10; vcpu_set_gpreg(vcpu, CPU_REG_R10, efi_ctx->r10);
cur_context->guest_cpu_regs.regs.r11 = efi_ctx->r11; vcpu_set_gpreg(vcpu, CPU_REG_R11, efi_ctx->r11);
cur_context->guest_cpu_regs.regs.r12 = efi_ctx->r12; vcpu_set_gpreg(vcpu, CPU_REG_R12, efi_ctx->r12);
cur_context->guest_cpu_regs.regs.r13 = efi_ctx->r13; vcpu_set_gpreg(vcpu, CPU_REG_R13, efi_ctx->r13);
cur_context->guest_cpu_regs.regs.r14 = efi_ctx->r14; vcpu_set_gpreg(vcpu, CPU_REG_R14, efi_ctx->r14);
cur_context->guest_cpu_regs.regs.r15 = efi_ctx->r15; vcpu_set_gpreg(vcpu, CPU_REG_R15, efi_ctx->r15);
/* defer irq enabling till vlapic is ready */ /* defer irq enabling till vlapic is ready */
CPU_IRQ_ENABLE(); CPU_IRQ_ENABLE();

View File

@ -102,8 +102,7 @@ void vcpu_thread(struct vcpu *vcpu)
basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU; basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
per_cpu(vmexit_cnt, vcpu->pcpu_id)[basic_exit_reason]++; per_cpu(vmexit_cnt, vcpu->pcpu_id)[basic_exit_reason]++;
TRACE_2L(TRACE_VM_EXIT, basic_exit_reason, TRACE_2L(TRACE_VM_EXIT, basic_exit_reason, vcpu_get_rip(vcpu));
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip);
} while (1); } while (1);
} }

View File

@ -73,29 +73,27 @@ static uint64_t create_zero_page(struct vm *vm)
int load_guest(struct vm *vm, struct vcpu *vcpu) int load_guest(struct vm *vm, struct vcpu *vcpu)
{ {
int32_t ret = 0; int32_t ret = 0;
uint32_t i;
void *hva; void *hva;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint64_t lowmem_gpa_top; uint64_t lowmem_gpa_top;
hva = GPA2HVA(vm, GUEST_CFG_OFFSET); hva = GPA2HVA(vm, GUEST_CFG_OFFSET);
lowmem_gpa_top = *(uint64_t *)hva; lowmem_gpa_top = *(uint64_t *)hva;
/* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/ /* hardcode vcpu entry addr(kernel entry) & rsi (zeropage)*/
(void)memset((void*)cur_context->guest_cpu_regs.longs, for (i = 0; i < NUM_GPRS; i++)
0U, sizeof(uint64_t)*NUM_GPRS); vcpu_set_gpreg(vcpu, i, 0UL);
hva = GPA2HVA(vm, lowmem_gpa_top - hva = GPA2HVA(vm, lowmem_gpa_top -
MEM_4K - MEM_2K); MEM_4K - MEM_2K);
vcpu->entry_addr = (void *)(*((uint64_t *)hva)); vcpu->entry_addr = (void *)(*((uint64_t *)hva));
cur_context->guest_cpu_regs.regs.rsi = vcpu_set_gpreg(vcpu, CPU_REG_RSI, lowmem_gpa_top - MEM_4K);
lowmem_gpa_top - MEM_4K;
pr_info("%s, Set config according to predefined offset:", pr_info("%s, Set config according to predefined offset:",
__func__); __func__);
pr_info("VCPU%hu Entry: 0x%llx, RSI: 0x%016llx, cr3: 0x%016llx", pr_info("VCPU%hu Entry: 0x%llx, RSI: 0x%016llx, cr3: 0x%016llx",
vcpu->vcpu_id, vcpu->entry_addr, vcpu->vcpu_id, vcpu->entry_addr,
cur_context->guest_cpu_regs.regs.rsi, vcpu_get_gpreg(vcpu, CPU_REG_RSI),
vm->arch_vm.guest_init_pml4); vm->arch_vm.guest_init_pml4);
return ret; return ret;
@ -105,8 +103,6 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
{ {
int32_t ret = 0; int32_t ret = 0;
void *hva; void *hva;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
char dyn_bootargs[100] = {0}; char dyn_bootargs[100] = {0};
uint32_t kernel_entry_offset; uint32_t kernel_entry_offset;
struct zero_page *zeropage; struct zero_page *zeropage;
@ -150,11 +146,13 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
/* See if guest is a Linux guest */ /* See if guest is a Linux guest */
if (vm->sw.kernel_type == VM_LINUX_GUEST) { if (vm->sw.kernel_type == VM_LINUX_GUEST) {
uint32_t i;
/* Documentation states: ebx=0, edi=0, ebp=0, esi=ptr to /* Documentation states: ebx=0, edi=0, ebp=0, esi=ptr to
* zeropage * zeropage
*/ */
(void)memset(cur_context->guest_cpu_regs.longs, for (i = 0; i < NUM_GPRS; i++)
0U, sizeof(uint64_t) * NUM_GPRS); vcpu_set_gpreg(vcpu, i, 0UL);
/* Get host-physical address for guest bootargs */ /* Get host-physical address for guest bootargs */
hva = GPA2HVA(vm, hva = GPA2HVA(vm,
@ -217,11 +215,11 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
/* Create Zeropage and copy Physical Base Address of Zeropage /* Create Zeropage and copy Physical Base Address of Zeropage
* in RSI * in RSI
*/ */
cur_context->guest_cpu_regs.regs.rsi = create_zero_page(vm); vcpu_set_gpreg(vcpu, CPU_REG_RSI, create_zero_page(vm));
pr_info("%s, RSI pointing to zero page for VM %d at GPA %X", pr_info("%s, RSI pointing to zero page for VM %d at GPA %X",
__func__, vm->vm_id, __func__, vm->vm_id,
cur_context->guest_cpu_regs.regs.rsi); vcpu_get_gpreg(vcpu, CPU_REG_RSI));
} else { } else {
pr_err("%s, Loading VM SW failed", __func__); pr_err("%s, Loading VM SW failed", __func__);

View File

@ -49,9 +49,6 @@ struct intr_excp_ctx *crash_ctx;
static void dump_guest_reg(struct vcpu *vcpu) static void dump_guest_reg(struct vcpu *vcpu)
{ {
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
printf("\n\n================================================"); printf("\n\n================================================");
printf("================================\n\n"); printf("================================\n\n");
printf("Guest Registers:\r\n"); printf("Guest Registers:\r\n");
@ -61,39 +58,39 @@ static void dump_guest_reg(struct vcpu *vcpu)
vcpu->arch_vcpu.cur_context); vcpu->arch_vcpu.cur_context);
printf("= RIP=0x%016llx RSP=0x%016llx " printf("= RIP=0x%016llx RSP=0x%016llx "
"RFLAGS=0x%016llx\r\n", "RFLAGS=0x%016llx\r\n",
cur_context->rip, vcpu_get_rip(vcpu),
cur_context->guest_cpu_regs.regs.rsp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
cur_context->rflags); vcpu_get_rflags(vcpu));
printf("= CR0=0x%016llx CR2=0x%016llx " printf("= CR0=0x%016llx CR2=0x%016llx "
" CR3=0x%016llx\r\n", " CR3=0x%016llx\r\n",
cur_context->cr0, vcpu_get_cr0(vcpu),
cur_context->cr2, vcpu_get_cr2(vcpu),
cur_context->cr3); exec_vmread(VMX_GUEST_CR3));
printf("= RAX=0x%016llx RBX=0x%016llx " printf("= RAX=0x%016llx RBX=0x%016llx "
"RCX=0x%016llx\r\n", "RCX=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rax, vcpu_get_gpreg(vcpu, CPU_REG_RAX),
cur_context->guest_cpu_regs.regs.rbx, vcpu_get_gpreg(vcpu, CPU_REG_RBX),
cur_context->guest_cpu_regs.regs.rcx); vcpu_get_gpreg(vcpu, CPU_REG_RCX));
printf("= RDX=0x%016llx RDI=0x%016llx " printf("= RDX=0x%016llx RDI=0x%016llx "
"RSI=0x%016llx\r\n", "RSI=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rdx, vcpu_get_gpreg(vcpu, CPU_REG_RDX),
cur_context->guest_cpu_regs.regs.rdi, vcpu_get_gpreg(vcpu, CPU_REG_RDI),
cur_context->guest_cpu_regs.regs.rsi); vcpu_get_gpreg(vcpu, CPU_REG_RSI));
printf("= RBP=0x%016llx R8=0x%016llx " printf("= RBP=0x%016llx R8=0x%016llx "
"R9=0x%016llx\r\n", "R9=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rbp, vcpu_get_gpreg(vcpu, CPU_REG_RBP),
cur_context->guest_cpu_regs.regs.r8, vcpu_get_gpreg(vcpu, CPU_REG_R8),
cur_context->guest_cpu_regs.regs.r9); vcpu_get_gpreg(vcpu, CPU_REG_R9));
printf("= R10=0x%016llx R11=0x%016llx " printf("= R10=0x%016llx R11=0x%016llx "
"R12=0x%016llx\r\n", "R12=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.r10, vcpu_get_gpreg(vcpu, CPU_REG_R10),
cur_context->guest_cpu_regs.regs.r11, vcpu_get_gpreg(vcpu, CPU_REG_R11),
cur_context->guest_cpu_regs.regs.r12); vcpu_get_gpreg(vcpu, CPU_REG_R12));
printf("= R13=0x%016llx R14=0x%016llx " printf("= R13=0x%016llx R14=0x%016llx "
"R15=0x%016llx\r\n", "R15=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.r13, vcpu_get_gpreg(vcpu, CPU_REG_R13),
cur_context->guest_cpu_regs.regs.r14, vcpu_get_gpreg(vcpu, CPU_REG_R14),
cur_context->guest_cpu_regs.regs.r15); vcpu_get_gpreg(vcpu, CPU_REG_R15));
printf("\r\n"); printf("\r\n");
} }
@ -101,11 +98,9 @@ static void dump_guest_stack(struct vcpu *vcpu)
{ {
uint32_t i; uint32_t i;
uint64_t tmp[DUMP_STACK_SIZE]; uint64_t tmp[DUMP_STACK_SIZE];
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint32_t err_code = 0; uint32_t err_code = 0;
if (copy_from_gva(vcpu, tmp, cur_context->guest_cpu_regs.regs.rsp, if (copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
DUMP_STACK_SIZE, &err_code) < 0) { DUMP_STACK_SIZE, &err_code) < 0) {
printf("\r\nUnabled to Copy Guest Stack:\r\n"); printf("\r\nUnabled to Copy Guest Stack:\r\n");
return; return;
@ -113,11 +108,11 @@ static void dump_guest_stack(struct vcpu *vcpu)
printf("\r\nGuest Stack:\r\n"); printf("\r\nGuest Stack:\r\n");
printf("Dump stack for vcpu %hu, from gva 0x%016llx\r\n", printf("Dump stack for vcpu %hu, from gva 0x%016llx\r\n",
vcpu->vcpu_id, cur_context->guest_cpu_regs.regs.rsp); vcpu->vcpu_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP));
for (i = 0U; i < (DUMP_STACK_SIZE/32U); i++) { for (i = 0U; i < (DUMP_STACK_SIZE/32U); i++) {
printf("guest_rsp(0x%llx): 0x%016llx 0x%016llx " printf("guest_rsp(0x%llx): 0x%016llx 0x%016llx "
"0x%016llx 0x%016llx\r\n", "0x%016llx 0x%016llx\r\n",
(cur_context->guest_cpu_regs.regs.rsp+(i*32)), (vcpu_get_gpreg(vcpu, CPU_REG_RSP)+(i*32)),
tmp[i*4], tmp[(i*4)+1], tmp[i*4], tmp[(i*4)+1],
tmp[(i*4)+2], tmp[(i*4)+3]); tmp[(i*4)+2], tmp[(i*4)+3]);
} }
@ -128,12 +123,10 @@ static void show_guest_call_trace(struct vcpu *vcpu)
{ {
uint64_t bp; uint64_t bp;
uint64_t count = 0UL; uint64_t count = 0UL;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
int err; int err;
uint32_t err_code; uint32_t err_code;
bp = cur_context->guest_cpu_regs.regs.rbp; bp = vcpu_get_gpreg(vcpu, CPU_REG_RBP);
printf("Guest Call Trace: **************************************\r\n"); printf("Guest Call Trace: **************************************\r\n");
printf("Maybe the call trace is not accurate, pls check stack!!\r\n"); printf("Maybe the call trace is not accurate, pls check stack!!\r\n");
/* if enable compiler option(no-omit-frame-pointer) the stack layout /* if enable compiler option(no-omit-frame-pointer) the stack layout

View File

@ -588,7 +588,6 @@ int shell_vcpu_dumpreg(int argc, char **argv)
struct vcpu *vcpu; struct vcpu *vcpu;
uint64_t i; uint64_t i;
uint64_t tmp[DUMPREG_SP_SIZE]; uint64_t tmp[DUMPREG_SP_SIZE];
struct run_context *cur_context;
uint32_t err_code = 0; uint32_t err_code = 0;
/* User input invalidation */ /* User input invalidation */
@ -618,8 +617,6 @@ int shell_vcpu_dumpreg(int argc, char **argv)
return -EINVAL; return -EINVAL;
} }
cur_context = &vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
if (vcpu->state != VCPU_PAUSED) { if (vcpu->state != VCPU_PAUSED) {
shell_puts("NOTE: VCPU unPAUSEed, regdump " shell_puts("NOTE: VCPU unPAUSEed, regdump "
"may not be accurate\r\n"); "may not be accurate\r\n");
@ -630,48 +627,49 @@ int shell_vcpu_dumpreg(int argc, char **argv)
vm->vm_id, vcpu->vcpu_id); vm->vm_id, vcpu->vcpu_id);
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= RIP=0x%016llx RSP=0x%016llx " snprintf(temp_str, MAX_STR_SIZE, "= RIP=0x%016llx RSP=0x%016llx "
"RFLAGS=0x%016llx\r\n", cur_context->rip, "RFLAGS=0x%016llx\r\n", vcpu_get_rip(vcpu),
cur_context->guest_cpu_regs.regs.rsp, cur_context->rflags); vcpu_get_gpreg(vcpu, CPU_REG_RSP),
vcpu_get_rflags(vcpu));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= CR0=0x%016llx CR2=0x%016llx\r\n", snprintf(temp_str, MAX_STR_SIZE, "= CR0=0x%016llx CR2=0x%016llx\r\n",
cur_context->cr0, cur_context->cr2); vcpu_get_cr0(vcpu), vcpu_get_cr2(vcpu));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= CR3=0x%016llx CR4=0x%016llx\r\n", snprintf(temp_str, MAX_STR_SIZE, "= CR3=0x%016llx CR4=0x%016llx\r\n",
cur_context->cr3, cur_context->cr4); exec_vmread(VMX_GUEST_CR3), vcpu_get_cr4(vcpu));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= RAX=0x%016llx RBX=0x%016llx " snprintf(temp_str, MAX_STR_SIZE, "= RAX=0x%016llx RBX=0x%016llx "
"RCX=0x%016llx\r\n", "RCX=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rax, vcpu_get_gpreg(vcpu, CPU_REG_RAX),
cur_context->guest_cpu_regs.regs.rbx, vcpu_get_gpreg(vcpu, CPU_REG_RBX),
cur_context->guest_cpu_regs.regs.rcx); vcpu_get_gpreg(vcpu, CPU_REG_RCX));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= RDX=0x%016llx RDI=0x%016llx " snprintf(temp_str, MAX_STR_SIZE, "= RDX=0x%016llx RDI=0x%016llx "
"RSI=0x%016llx\r\n", "RSI=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rdx, vcpu_get_gpreg(vcpu, CPU_REG_RDX),
cur_context->guest_cpu_regs.regs.rdi, vcpu_get_gpreg(vcpu, CPU_REG_RDI),
cur_context->guest_cpu_regs.regs.rsi); vcpu_get_gpreg(vcpu, CPU_REG_RSI));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= RBP=0x%016llx R8=0x%016llx " snprintf(temp_str, MAX_STR_SIZE, "= RBP=0x%016llx R8=0x%016llx "
"R9=0x%016llx\r\n", "R9=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.rbp, vcpu_get_gpreg(vcpu, CPU_REG_RBP),
cur_context->guest_cpu_regs.regs.r8, vcpu_get_gpreg(vcpu, CPU_REG_R8),
cur_context->guest_cpu_regs.regs.r9); vcpu_get_gpreg(vcpu, CPU_REG_R9));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, "= R10=0x%016llx R11=0x%016llx " snprintf(temp_str, MAX_STR_SIZE, "= R10=0x%016llx R11=0x%016llx "
"R12=0x%016llx\r\n", "R12=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.r10, vcpu_get_gpreg(vcpu, CPU_REG_R10),
cur_context->guest_cpu_regs.regs.r11, vcpu_get_gpreg(vcpu, CPU_REG_R11),
cur_context->guest_cpu_regs.regs.r12); vcpu_get_gpreg(vcpu, CPU_REG_R12));
shell_puts(temp_str); shell_puts(temp_str);
snprintf(temp_str, MAX_STR_SIZE, snprintf(temp_str, MAX_STR_SIZE,
"= R13=0x%016llx R14=0x%016llx R15=0x%016llx\r\n", "= R13=0x%016llx R14=0x%016llx R15=0x%016llx\r\n",
cur_context->guest_cpu_regs.regs.r13, vcpu_get_gpreg(vcpu, CPU_REG_R13),
cur_context->guest_cpu_regs.regs.r14, vcpu_get_gpreg(vcpu, CPU_REG_R14),
cur_context->guest_cpu_regs.regs.r15); vcpu_get_gpreg(vcpu, CPU_REG_R15));
shell_puts(temp_str); shell_puts(temp_str);
/* dump sp */ /* dump sp */
status = copy_from_gva(vcpu, tmp, cur_context->guest_cpu_regs.regs.rsp, status = copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
DUMPREG_SP_SIZE*sizeof(uint64_t), &err_code); DUMPREG_SP_SIZE*sizeof(uint64_t), &err_code);
if (status < 0) { if (status < 0) {
/* copy_from_gva fail */ /* copy_from_gva fail */
@ -680,7 +678,7 @@ int shell_vcpu_dumpreg(int argc, char **argv)
snprintf(temp_str, MAX_STR_SIZE, snprintf(temp_str, MAX_STR_SIZE,
"\r\nDump RSP for vm %hu, from " "\r\nDump RSP for vm %hu, from "
"gva 0x%016llx\r\n", "gva 0x%016llx\r\n",
vm_id, cur_context->guest_cpu_regs.regs.rsp); vm_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP));
shell_puts(temp_str); shell_puts(temp_str);
for (i = 0UL; i < 8UL; i++) { for (i = 0UL; i < 8UL; i++) {

View File

@ -152,6 +152,65 @@
#ifndef ASSEMBLER #ifndef ASSEMBLER
/**
*
* Identifiers for architecturally defined registers.
*
* These register names is used in condition statement.
* Within the following groups,register name need to be
* kept in order:
* General register names group (CPU_REG_RAX~CPU_REG_R15);
* Non general register names group (CPU_REG_CR0~CPU_REG_GDTR);
* Segement register names group (CPU_REG_ES~CPU_REG_GS).
*/
enum cpu_reg_name {
/* General purpose register layout should align with
* struct cpu_gp_regs
*/
CPU_REG_RAX,
CPU_REG_RCX,
CPU_REG_RDX,
CPU_REG_RBX,
CPU_REG_RSP,
CPU_REG_RBP,
CPU_REG_RSI,
CPU_REG_RDI,
CPU_REG_R8,
CPU_REG_R9,
CPU_REG_R10,
CPU_REG_R11,
CPU_REG_R12,
CPU_REG_R13,
CPU_REG_R14,
CPU_REG_R15,
CPU_REG_CR0,
CPU_REG_CR2,
CPU_REG_CR3,
CPU_REG_CR4,
CPU_REG_DR7,
CPU_REG_RIP,
CPU_REG_RFLAGS,
/*CPU_REG_NATURAL_LAST*/
CPU_REG_EFER,
CPU_REG_PDPTE0,
CPU_REG_PDPTE1,
CPU_REG_PDPTE2,
CPU_REG_PDPTE3,
/*CPU_REG_64BIT_LAST,*/
CPU_REG_ES,
CPU_REG_CS,
CPU_REG_SS,
CPU_REG_DS,
CPU_REG_FS,
CPU_REG_GS,
CPU_REG_LDTR,
CPU_REG_TR,
CPU_REG_IDTR,
CPU_REG_GDTR
/*CPU_REG_LAST*/
};
/**********************************/ /**********************************/
/* EXTERNAL VARIABLES */ /* EXTERNAL VARIABLES */
/**********************************/ /**********************************/

View File

@ -263,6 +263,8 @@ struct vcpu {
#ifdef CONFIG_MTRR_ENABLED #ifdef CONFIG_MTRR_ENABLED
struct mtrr_state mtrr; struct mtrr_state mtrr;
#endif #endif
uint64_t reg_cached;
uint64_t reg_updated;
}; };
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == BOOT_CPU_ID) #define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == BOOT_CPU_ID)
@ -273,6 +275,25 @@ static inline void vcpu_retain_rip(struct vcpu *vcpu)
} }
/* External Interfaces */ /* External Interfaces */
uint64_t vcpu_get_gpreg(struct vcpu *vcpu, uint32_t reg);
void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val);
uint64_t vcpu_get_rip(struct vcpu *vcpu);
void vcpu_set_rip(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_rsp(struct vcpu *vcpu);
void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_efer(struct vcpu *vcpu);
void vcpu_set_efer(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_rflags(struct vcpu *vcpu);
void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_cr0(struct vcpu *vcpu);
int vcpu_set_cr0(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_cr2(struct vcpu *vcpu);
void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_cr4(struct vcpu *vcpu);
int vcpu_set_cr4(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_pat_ext(struct vcpu *vcpu);
void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id); struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle); int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
int start_vcpu(struct vcpu *vcpu); int start_vcpu(struct vcpu *vcpu);

View File

@ -447,7 +447,6 @@ uint64_t vmx_rdmsr_pat(struct vcpu *vcpu);
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value); int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0); int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
int vmx_write_cr3(struct vcpu *vcpu, uint64_t cr3);
int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4); int vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4);
static inline enum vm_cpu_mode get_vcpu_mode(struct vcpu *vcpu) static inline enum vm_cpu_mode get_vcpu_mode(struct vcpu *vcpu)