mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-31 23:38:24 +00:00
hv: support gva2gpa in different paging modes
Translate gva2gpa in different paging modes. Change the definition of gva2gpa. - return value for error status - Add a parameter for error code when paging fault. Change the definition of vm_gva2gpa. - return value for error status - Add a parameter for error code when paing fault. Signed-off-by: Binbin Wu <binbin.wu@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Xu, Anthony <anthony.xu@intel.com>
This commit is contained in:
parent
dd14d8e1b0
commit
9e7179c950
@ -15,6 +15,19 @@ uint32_t e820_entries;
|
|||||||
struct e820_entry e820[E820_MAX_ENTRIES];
|
struct e820_entry e820[E820_MAX_ENTRIES];
|
||||||
struct e820_mem_params e820_mem;
|
struct e820_mem_params e820_mem;
|
||||||
|
|
||||||
|
struct page_walk_info {
|
||||||
|
uint64_t top_entry; /* Top level paging structure entry */
|
||||||
|
int level;
|
||||||
|
int width;
|
||||||
|
bool is_user_mode;
|
||||||
|
bool is_write_access;
|
||||||
|
bool is_inst_fetch;
|
||||||
|
bool pse; /* CR4.PSE for 32bit paing,
|
||||||
|
* true for PAE/4-level paing */
|
||||||
|
bool wp; /* CR0.WP */
|
||||||
|
bool nxe; /* MSR_IA32_EFER_NXE_BIT */
|
||||||
|
};
|
||||||
|
|
||||||
inline bool
|
inline bool
|
||||||
is_vm0(struct vm *vm)
|
is_vm0(struct vm *vm)
|
||||||
{
|
{
|
||||||
@ -173,34 +186,179 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
|
|||||||
return PAGING_MODE_4_LEVEL;
|
return PAGING_MODE_4_LEVEL;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t gva2gpa(struct vm *vm, uint64_t cr3, uint64_t gva)
|
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
|
||||||
|
* during page walk */
|
||||||
|
static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
|
||||||
|
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
|
||||||
{
|
{
|
||||||
int level, index, shift;
|
int i, index, shift;
|
||||||
uint64_t *base, addr, entry, page_size;
|
uint8_t *base;
|
||||||
uint64_t gpa = 0;
|
uint64_t entry;
|
||||||
|
uint64_t addr, page_size;
|
||||||
|
int ret = 0;
|
||||||
|
int fault = 0;
|
||||||
|
|
||||||
addr = cr3;
|
if (pw_info->level < 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
for (level = 3; level >= 0; level--) {
|
addr = pw_info->top_entry;
|
||||||
|
for (i = pw_info->level - 1; i >= 0; i--) {
|
||||||
addr = addr & IA32E_REF_MASK;
|
addr = addr & IA32E_REF_MASK;
|
||||||
base = GPA2HVA(vm, addr);
|
base = GPA2HVA(vcpu->vm, addr);
|
||||||
ASSERT(base != NULL, "invalid ptp base.");
|
if (base == NULL) {
|
||||||
shift = level * 9 + 12;
|
ret = -EFAULT;
|
||||||
index = (gva >> shift) & 0x1FF;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
shift = i * pw_info->width + 12;
|
||||||
|
index = (gva >> shift) & ((1UL << pw_info->width) - 1);
|
||||||
page_size = 1UL << shift;
|
page_size = 1UL << shift;
|
||||||
|
|
||||||
entry = base[index];
|
if (pw_info->width == 10)
|
||||||
if (level > 0 && (entry & MMU_32BIT_PDE_PS) != 0)
|
/* 32bit entry */
|
||||||
|
entry = *((uint32_t *)(base + 4 * index));
|
||||||
|
else
|
||||||
|
entry = *((uint64_t *)(base + 8 * index));
|
||||||
|
|
||||||
|
/* check if the entry present */
|
||||||
|
if (!(entry & MMU_32BIT_PDE_P)) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
/* check for R/W */
|
||||||
|
if (pw_info->is_write_access && !(entry & MMU_32BIT_PDE_RW)) {
|
||||||
|
/* Case1: Supermode and wp is 1
|
||||||
|
* Case2: Usermode */
|
||||||
|
if (!(!pw_info->is_user_mode && !pw_info->wp))
|
||||||
|
fault = 1;
|
||||||
|
}
|
||||||
|
/* check for nx, since for 32-bit paing, the XD bit is
|
||||||
|
* reserved(0), use the same logic as PAE/4-level paging */
|
||||||
|
if (pw_info->is_inst_fetch && pw_info->nxe &&
|
||||||
|
(entry & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE))
|
||||||
|
fault = 1;
|
||||||
|
|
||||||
|
/* check for U/S */
|
||||||
|
if (!(entry & MMU_32BIT_PDE_US) && pw_info->is_user_mode)
|
||||||
|
fault = 1;
|
||||||
|
|
||||||
|
if (pw_info->pse && (i > 0 && (entry & MMU_32BIT_PDE_PS)))
|
||||||
break;
|
break;
|
||||||
addr = entry;
|
addr = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry >>= shift; entry <<= (shift + 12); entry >>= 12;
|
entry >>= shift;
|
||||||
gpa = entry | (gva & (page_size - 1));
|
/* shift left 12bit more and back to clear XD/Prot Key/Ignored bits */
|
||||||
|
entry <<= (shift + 12);
|
||||||
|
entry >>= 12;
|
||||||
|
*gpa = entry | (gva & (page_size - 1));
|
||||||
|
out:
|
||||||
|
|
||||||
return gpa;
|
if (fault) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
*err_code |= PAGE_FAULT_P_FLAG;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
|
||||||
|
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
|
||||||
|
{
|
||||||
|
int index;
|
||||||
|
uint64_t *base;
|
||||||
|
uint64_t entry;
|
||||||
|
uint64_t addr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
addr = pw_info->top_entry & 0xFFFFFFF0UL;
|
||||||
|
base = GPA2HVA(vcpu->vm, addr);
|
||||||
|
if (base == NULL) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
index = (gva >> 30) & 0x3;
|
||||||
|
entry = base[index];
|
||||||
|
|
||||||
|
if (!(entry & MMU_32BIT_PDE_P)) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
pw_info->level = 2;
|
||||||
|
pw_info->top_entry = entry;
|
||||||
|
ret = _gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Refer to SDM Vol.3A 6-39 section 6.15 for the format of paging fault error
|
||||||
|
* code.
|
||||||
|
*
|
||||||
|
* Caller should set the contect of err_code properly according to the address
|
||||||
|
* usage when calling this function:
|
||||||
|
* - If it is an address for write, set PAGE_FAULT_WR_FLAG in err_code.
|
||||||
|
* - If it is an address for instruction featch, set PAGE_FAULT_ID_FLAG in
|
||||||
|
* err_code.
|
||||||
|
* Caller should check the return value to confirm if the function success or
|
||||||
|
* not.
|
||||||
|
* If a protection volation detected during page walk, this function still will
|
||||||
|
* give the gpa translated, it is up to caller to decide if it need to inject a
|
||||||
|
* #PF or not.
|
||||||
|
* - Return 0 for success.
|
||||||
|
* - Return -EINVAL for invalid parameter.
|
||||||
|
* - Return -EFAULT for paging fault, and refer to err_code for paging fault
|
||||||
|
* error code.
|
||||||
|
*/
|
||||||
|
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
|
||||||
|
uint32_t *err_code)
|
||||||
|
{
|
||||||
|
struct run_context *cur_context =
|
||||||
|
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||||
|
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
|
||||||
|
struct page_walk_info pw_info;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!gpa || !err_code)
|
||||||
|
return -EINVAL;
|
||||||
|
*gpa = 0;
|
||||||
|
|
||||||
|
pw_info.top_entry = cur_context->cr3;
|
||||||
|
pw_info.level = pm;
|
||||||
|
pw_info.is_write_access = !!(*err_code & PAGE_FAULT_WR_FLAG);
|
||||||
|
pw_info.is_inst_fetch = !!(*err_code & PAGE_FAULT_ID_FLAG);
|
||||||
|
pw_info.is_user_mode = ((exec_vmread(VMX_GUEST_CS_SEL) & 0x3) == 3);
|
||||||
|
pw_info.pse = true;
|
||||||
|
pw_info.nxe = cur_context->ia32_efer & MSR_IA32_EFER_NXE_BIT;
|
||||||
|
pw_info.wp = !!(cur_context->cr0 & CR0_WP);
|
||||||
|
|
||||||
|
*err_code &= ~PAGE_FAULT_P_FLAG;
|
||||||
|
|
||||||
|
if (pm == PAGING_MODE_4_LEVEL) {
|
||||||
|
pw_info.width = 9;
|
||||||
|
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
|
||||||
|
} else if(pm == PAGING_MODE_3_LEVEL) {
|
||||||
|
pw_info.width = 9;
|
||||||
|
ret = _gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
|
||||||
|
} else if (pm == PAGING_MODE_2_LEVEL) {
|
||||||
|
pw_info.width = 10;
|
||||||
|
pw_info.pse = !!(cur_context->cr4 & CR4_PSE);
|
||||||
|
pw_info.nxe = false;
|
||||||
|
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
|
||||||
|
} else
|
||||||
|
*gpa = gva;
|
||||||
|
|
||||||
|
if (ret == -EFAULT) {
|
||||||
|
if (pw_info.is_user_mode)
|
||||||
|
*err_code |= PAGE_FAULT_US_FLAG;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void init_e820(void)
|
void init_e820(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
@ -674,6 +674,7 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
|
|||||||
uint64_t dstaddr, srcaddr, dstgpa, srcgpa;
|
uint64_t dstaddr, srcaddr, dstgpa, srcgpa;
|
||||||
uint64_t rcx, rdi, rsi, rflags;
|
uint64_t rcx, rdi, rsi, rflags;
|
||||||
int error, fault, opsize, seg, repeat;
|
int error, fault, opsize, seg, repeat;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
|
opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
|
||||||
error = 0;
|
error = 0;
|
||||||
@ -713,8 +714,14 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
|
|||||||
if (error || fault)
|
if (error || fault)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
vm_gva2gpa(vcpu, srcaddr, &srcgpa);
|
err_code = 0;
|
||||||
vm_gva2gpa(vcpu, dstaddr, &dstgpa);
|
error = vm_gva2gpa(vcpu, srcaddr, &srcgpa, &err_code);
|
||||||
|
if (error)
|
||||||
|
goto done;
|
||||||
|
err_code = PAGE_FAULT_WR_FLAG;
|
||||||
|
error = vm_gva2gpa(vcpu, dstaddr, &dstgpa, &err_code);
|
||||||
|
if (error)
|
||||||
|
goto done;
|
||||||
memcpy_s((char *)dstaddr, 16, (char *)srcaddr, opsize);
|
memcpy_s((char *)dstaddr, 16, (char *)srcaddr, opsize);
|
||||||
|
|
||||||
error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
|
error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
|
||||||
@ -1236,6 +1243,7 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
struct seg_desc ss_desc;
|
struct seg_desc ss_desc;
|
||||||
uint64_t cr0, rflags, rsp, stack_gla, stack_gpa, val;
|
uint64_t cr0, rflags, rsp, stack_gla, stack_gpa, val;
|
||||||
int error, size, stackaddrsize, pushop;
|
int error, size, stackaddrsize, pushop;
|
||||||
|
uint32_t err_code = 0;
|
||||||
|
|
||||||
memset(&ss_desc, 0, sizeof(ss_desc));
|
memset(&ss_desc, 0, sizeof(ss_desc));
|
||||||
|
|
||||||
@ -1302,7 +1310,13 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm_gva2gpa(vcpu, stack_gla, &stack_gpa);
|
if (pushop)
|
||||||
|
err_code |= PAGE_FAULT_WR_FLAG;
|
||||||
|
error = vm_gva2gpa(vcpu, stack_gla, &stack_gpa, &err_code);
|
||||||
|
if (error) {
|
||||||
|
pr_err("%s: failed to translate gva2gpa", __func__);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
if (pushop) {
|
if (pushop) {
|
||||||
error = memread(vcpu, mmio_gpa, &val, size, arg);
|
error = memread(vcpu, mmio_gpa, &val, size, arg);
|
||||||
if (error == 0)
|
if (error == 0)
|
||||||
|
@ -304,15 +304,15 @@ static int mmio_write(struct vcpu *vcpu, __unused uint64_t gpa, uint64_t wval,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vm_gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa)
|
int vm_gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
|
||||||
|
uint32_t *err_code)
|
||||||
{
|
{
|
||||||
|
|
||||||
ASSERT(gpa != NULL, "Error in input arguments");
|
ASSERT(gpa != NULL, "Error in input arguments");
|
||||||
ASSERT(vcpu != NULL,
|
ASSERT(vcpu != NULL,
|
||||||
"Invalid vcpu id when gva2gpa");
|
"Invalid vcpu id when gva2gpa");
|
||||||
|
|
||||||
*gpa = gva2gpa(vcpu->vm,
|
return gva2gpa(vcpu, gva, gpa, err_code);
|
||||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr3, gva);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t decode_instruction(struct vcpu *vcpu)
|
uint8_t decode_instruction(struct vcpu *vcpu)
|
||||||
@ -323,13 +323,19 @@ uint8_t decode_instruction(struct vcpu *vcpu)
|
|||||||
uint32_t csar;
|
uint32_t csar;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
enum vm_cpu_mode cpu_mode;
|
enum vm_cpu_mode cpu_mode;
|
||||||
|
int error;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
guest_rip_gva =
|
guest_rip_gva =
|
||||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
|
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
|
||||||
|
|
||||||
guest_rip_gpa = gva2gpa(vcpu->vm,
|
err_code = PAGE_FAULT_ID_FLAG;
|
||||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr3,
|
error = gva2gpa(vcpu, guest_rip_gva, &guest_rip_gpa, &err_code);
|
||||||
guest_rip_gva);
|
if (error) {
|
||||||
|
pr_err("gva2gpa failed for guest_rip_gva 0x%016llx:",
|
||||||
|
guest_rip_gva);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
guest_rip_hva = GPA2HVA(vcpu->vm, guest_rip_gpa);
|
guest_rip_hva = GPA2HVA(vcpu->vm, guest_rip_gpa);
|
||||||
emul_cnx = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
|
emul_cnx = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
|
||||||
|
@ -32,10 +32,16 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
|
|||||||
int data_size, data_page_num;
|
int data_size, data_page_num;
|
||||||
uint8_t *ucode_ptr, *ptr;
|
uint8_t *ucode_ptr, *ptr;
|
||||||
int chunk_size;
|
int chunk_size;
|
||||||
|
int error = 0;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
gva = v - sizeof(struct ucode_header);
|
gva = v - sizeof(struct ucode_header);
|
||||||
|
|
||||||
vm_gva2gpa(vcpu, gva, &gpa);
|
err_code = 0;
|
||||||
|
error = vm_gva2gpa(vcpu, gva, &gpa, &err_code);
|
||||||
|
if (error)
|
||||||
|
return;
|
||||||
|
|
||||||
uhdr = (struct ucode_header *)GPA2HVA(vcpu->vm, gpa);
|
uhdr = (struct ucode_header *)GPA2HVA(vcpu->vm, gpa);
|
||||||
|
|
||||||
data_size = GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header);
|
data_size = GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header);
|
||||||
@ -60,7 +66,12 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
|
|||||||
ucode_ptr += chunk_size;
|
ucode_ptr += chunk_size;
|
||||||
gva += chunk_size;
|
gva += chunk_size;
|
||||||
|
|
||||||
vm_gva2gpa(vcpu, gva, &gpa);
|
err_code = 0;
|
||||||
|
error = vm_gva2gpa(vcpu, gva, &gpa, &err_code);
|
||||||
|
if (error) {
|
||||||
|
free(ucode_ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
hva = (uint64_t)GPA2HVA(vcpu->vm, gpa);
|
hva = (uint64_t)GPA2HVA(vcpu->vm, gpa);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,8 +104,16 @@ static void dump_guest_stack(struct vcpu *vcpu)
|
|||||||
uint64_t page2_size;
|
uint64_t page2_size;
|
||||||
struct run_context *cur_context =
|
struct run_context *cur_context =
|
||||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||||
|
uint32_t err_code;
|
||||||
|
int err;
|
||||||
|
|
||||||
gpa = gva2gpa(vcpu->vm, cur_context->cr3, cur_context->rsp);
|
err_code = 0;
|
||||||
|
err = gva2gpa(vcpu, cur_context->rsp, &gpa, &err_code);
|
||||||
|
if (err) {
|
||||||
|
printf("gva2gpa failed for guest rsp 0x%016llx\r\n",
|
||||||
|
cur_context->rsp);
|
||||||
|
return;
|
||||||
|
}
|
||||||
hpa = gpa2hpa(vcpu->vm, gpa);
|
hpa = gpa2hpa(vcpu->vm, gpa);
|
||||||
printf("\r\nGuest Stack:\r\n");
|
printf("\r\nGuest Stack:\r\n");
|
||||||
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
|
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
|
||||||
@ -131,8 +139,15 @@ static void dump_guest_stack(struct vcpu *vcpu)
|
|||||||
"0x%016llx\r\n", (hpa+i*32), tmp[i*4],
|
"0x%016llx\r\n", (hpa+i*32), tmp[i*4],
|
||||||
tmp[i*4+1], tmp[i*4+2], tmp[i*4+3]);
|
tmp[i*4+1], tmp[i*4+2], tmp[i*4+3]);
|
||||||
}
|
}
|
||||||
gpa = gva2gpa(vcpu->vm, cur_context->cr3,
|
err_code = 0;
|
||||||
|
err = gva2gpa(vcpu, cur_context->rsp + page1_size, &gpa,
|
||||||
|
&err_code);
|
||||||
|
if (err) {
|
||||||
|
printf("gva2gpa failed for guest rsp 0x%016llx\r\n",
|
||||||
cur_context->rsp + page1_size);
|
cur_context->rsp + page1_size);
|
||||||
|
return;
|
||||||
|
|
||||||
|
}
|
||||||
hpa = gpa2hpa(vcpu->vm, gpa);
|
hpa = gpa2hpa(vcpu->vm, gpa);
|
||||||
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
|
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
|
||||||
"gpa 0x%016llx -> hpa 0x%016llx \r\n",
|
"gpa 0x%016llx -> hpa 0x%016llx \r\n",
|
||||||
@ -159,6 +174,8 @@ static void show_guest_call_trace(struct vcpu *vcpu)
|
|||||||
uint64_t count = 0;
|
uint64_t count = 0;
|
||||||
struct run_context *cur_context =
|
struct run_context *cur_context =
|
||||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||||
|
int err;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
bp = cur_context->guest_cpu_regs.regs.rbp;
|
bp = cur_context->guest_cpu_regs.regs.rbp;
|
||||||
printf("Guest Call Trace: **************************************\r\n");
|
printf("Guest Call Trace: **************************************\r\n");
|
||||||
@ -177,7 +194,11 @@ static void show_guest_call_trace(struct vcpu *vcpu)
|
|||||||
* if the address is invalid, it will cause hv page fault
|
* if the address is invalid, it will cause hv page fault
|
||||||
* then halt system */
|
* then halt system */
|
||||||
while ((count++ < CALL_TRACE_HIERARCHY_MAX) && (bp != 0)) {
|
while ((count++ < CALL_TRACE_HIERARCHY_MAX) && (bp != 0)) {
|
||||||
gpa = gva2gpa(vcpu->vm, cur_context->cr3, bp);
|
err = gva2gpa(vcpu, bp, &gpa, &err_code);
|
||||||
|
if (err) {
|
||||||
|
printf("gva2gpa failed for guest bp 0x%016llx\r\n", bp);
|
||||||
|
break;
|
||||||
|
}
|
||||||
hpa = gpa2hpa(vcpu->vm, gpa);
|
hpa = gpa2hpa(vcpu->vm, gpa);
|
||||||
hva = HPA2HVA(hpa);
|
hva = HPA2HVA(hpa);
|
||||||
printf("BP_GVA(0x%016llx)->BP_GPA(0x%016llx)"
|
printf("BP_GVA(0x%016llx)->BP_GPA(0x%016llx)"
|
||||||
|
@ -648,6 +648,7 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
|
|||||||
uint64_t gpa, hpa, i;
|
uint64_t gpa, hpa, i;
|
||||||
uint64_t *tmp;
|
uint64_t *tmp;
|
||||||
struct run_context *cur_context;
|
struct run_context *cur_context;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
/* User input invalidation */
|
/* User input invalidation */
|
||||||
if (argc != 3) {
|
if (argc != 3) {
|
||||||
@ -724,10 +725,8 @@ int shell_vcpu_dumpreg(struct shell *p_shell,
|
|||||||
shell_puts(p_shell, temp_str);
|
shell_puts(p_shell, temp_str);
|
||||||
|
|
||||||
/* dump sp */
|
/* dump sp */
|
||||||
gpa = gva2gpa(vm, cur_context->cr3,
|
status = gva2gpa(vcpu, cur_context->rsp, &gpa, &err_code);
|
||||||
cur_context->rsp);
|
if (status) {
|
||||||
if (gpa == 0) {
|
|
||||||
status = -EINVAL;
|
|
||||||
shell_puts(p_shell, "Cannot handle user gva yet!\r\n");
|
shell_puts(p_shell, "Cannot handle user gva yet!\r\n");
|
||||||
} else {
|
} else {
|
||||||
hpa = gpa2hpa(vm, gpa);
|
hpa = gpa2hpa(vm, gpa);
|
||||||
@ -763,6 +762,7 @@ int shell_vcpu_dumpmem(struct shell *p_shell,
|
|||||||
char temp_str[MAX_STR_SIZE];
|
char temp_str[MAX_STR_SIZE];
|
||||||
struct vm *vm;
|
struct vm *vm;
|
||||||
struct vcpu *vcpu;
|
struct vcpu *vcpu;
|
||||||
|
uint32_t err_code;
|
||||||
|
|
||||||
/* User input invalidation */
|
/* User input invalidation */
|
||||||
if (argc != 4 && argc != 5) {
|
if (argc != 4 && argc != 5) {
|
||||||
@ -791,12 +791,8 @@ int shell_vcpu_dumpmem(struct shell *p_shell,
|
|||||||
|
|
||||||
vcpu = vcpu_from_vid(vm, (long)vcpu_id);
|
vcpu = vcpu_from_vid(vm, (long)vcpu_id);
|
||||||
if (vcpu) {
|
if (vcpu) {
|
||||||
struct run_context *cur_context =
|
status = gva2gpa(vcpu, gva, &gpa, &err_code);
|
||||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
if (status) {
|
||||||
|
|
||||||
gpa = gva2gpa(vcpu->vm, cur_context->cr3, gva);
|
|
||||||
if (gpa == 0) {
|
|
||||||
status = -EINVAL;
|
|
||||||
shell_puts(p_shell,
|
shell_puts(p_shell,
|
||||||
"Cannot handle user gva yet!\r\n");
|
"Cannot handle user gva yet!\r\n");
|
||||||
} else {
|
} else {
|
||||||
|
@ -91,8 +91,9 @@ bool is_vm0(struct vm *vm);
|
|||||||
bool vm_lapic_disabled(struct vm *vm);
|
bool vm_lapic_disabled(struct vm *vm);
|
||||||
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
|
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
|
||||||
|
|
||||||
uint64_t gva2gpa(struct vm *vm, uint64_t cr3, uint64_t gva);
|
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
|
||||||
void vm_gva2gpa(struct vcpu *vcpu, uint64_t gla, uint64_t *gpa);
|
int vm_gva2gpa(struct vcpu *vcpu, uint64_t gla, uint64_t *gpa,
|
||||||
|
uint32_t *err_code);
|
||||||
|
|
||||||
struct vcpu *get_primary_vcpu(struct vm *vm);
|
struct vcpu *get_primary_vcpu(struct vm *vm);
|
||||||
struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id);
|
struct vcpu *vcpu_from_vid(struct vm *vm, int vcpu_id);
|
||||||
|
Loading…
Reference in New Issue
Block a user