mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 04:02:05 +00:00
HV: instr_emul: Handle error gracefully
ASSERT is too strict for HV when error happens during emulating instruction. This patch remove all ASSERT and return a negative error code when failing to emulate instruction. Originally, getcc will return -EINVAL when opsize are not one of (1, 2, 4, 8). But theoretically, opsize in current implementation can only be one of (1, 2, 4, 8). So, we will always get valid "cc". This patch add a pre-assumption and make sure that getcc always return valid value. For the current code, #GP will be injected to guest if something goes wrong with instruction emulation. Signed-off-by: Kaige Fu <kaige.fu@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
parent
8836abef21
commit
ce79d3aa24
@ -313,8 +313,6 @@ vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
|
||||
}
|
||||
|
||||
error = vm_set_register(vcpu, reg, val);
|
||||
ASSERT(error == 0, "%s: Error (%d) happens when update reg",
|
||||
__func__, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -358,21 +356,21 @@ build_getcc(getcc16, uint16_t, x, y)
|
||||
build_getcc(getcc32, uint32_t, x, y)
|
||||
build_getcc(getcc64, uint64_t, x, y)
|
||||
|
||||
static uint64_t
|
||||
getcc(uint8_t opsize, uint64_t x, uint64_t y)
|
||||
/**
|
||||
* @pre opsize = 1, 2, 4 or 8
|
||||
*/
|
||||
static uint64_t getcc(uint8_t opsize, uint64_t x, uint64_t y)
|
||||
{
|
||||
ASSERT(opsize == 1U || opsize == 2U || opsize == 4U || opsize == 8U,
|
||||
"getcc: invalid operand size %hhu", opsize);
|
||||
|
||||
if (opsize == 1U) {
|
||||
return getcc8((uint8_t)x, (uint8_t)y);
|
||||
} else if (opsize == 2U) {
|
||||
return getcc16((uint16_t)x, (uint16_t)y);
|
||||
} else if (opsize == 4U) {
|
||||
return getcc32((uint32_t)x, (uint32_t)y);
|
||||
} else {
|
||||
return getcc64(x, y);
|
||||
}
|
||||
switch (opsize) {
|
||||
case 1:
|
||||
return getcc8((uint8_t) x, (uint8_t) y);
|
||||
case 2:
|
||||
return getcc16((uint16_t) x, (uint16_t) y);
|
||||
case 4:
|
||||
return getcc32((uint32_t) x, (uint32_t) y);
|
||||
default: /* opsize == 8 */
|
||||
return getcc64(x, y);
|
||||
}
|
||||
}
|
||||
|
||||
static int emulate_mov(struct vcpu *vcpu, struct instr_emul_vie *vie)
|
||||
@ -723,7 +721,6 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
|
||||
}
|
||||
}
|
||||
done:
|
||||
ASSERT(error == 0, "%s: unexpected error %d", __func__, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -1206,8 +1203,6 @@ static int emulate_stack_op(struct vcpu *vcpu, struct instr_emul_vie *vie,
|
||||
* stack pointer.
|
||||
*/
|
||||
error = vm_get_seg_desc(vcpu, CPU_REG_SS, &ss_desc);
|
||||
ASSERT(error == 0, "%s: error %d getting SS descriptor",
|
||||
__func__, error);
|
||||
if (SEG_DESC_DEF32(ss_desc.access)) {
|
||||
stackaddrsize = 4U;
|
||||
} else {
|
||||
@ -1428,15 +1423,12 @@ int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0, uint64_t rflags,
|
||||
uint64_t gla)
|
||||
int vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0,
|
||||
uint64_t rflags, uint64_t gla)
|
||||
{
|
||||
ASSERT(size == 1U || size == 2U || size == 4U || size == 8U,
|
||||
"%s: invalid size %hhu", __func__, size);
|
||||
ASSERT(cpl <= 3U, "%s: invalid cpl %d", __func__, cpl);
|
||||
pr_dbg("Checking alignment with cpl: %hhu, addrsize: %hhu", cpl, size);
|
||||
|
||||
if (cpl != 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
|
||||
if (cpl < 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1464,8 +1456,15 @@ vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
/*
|
||||
*@pre seg must be segment register index
|
||||
*@pre length_arg must be 1, 2, 4 or 8
|
||||
*@pre prot must be PROT_READ or PROT_WRITE
|
||||
*
|
||||
*return 0 - on success
|
||||
*return -1 - on failure
|
||||
*/
|
||||
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
struct seg_desc *desc, uint64_t offset_arg, uint8_t length_arg,
|
||||
uint8_t addrsize, uint32_t prot, uint64_t *gla)
|
||||
{
|
||||
@ -1475,23 +1474,20 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
uint8_t glasize;
|
||||
uint32_t type;
|
||||
|
||||
ASSERT((seg >= CPU_REG_SEG_FIRST) && (seg <= CPU_REG_SEG_LAST),
|
||||
"%s: invalid segment %d", __func__, seg);
|
||||
ASSERT(length == 1U || length == 2U || length == 4U || length == 8U,
|
||||
"%s: invalid operand size %hhu", __func__, length);
|
||||
ASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0U,
|
||||
"%s: invalid prot %#x", __func__, prot);
|
||||
|
||||
firstoff = offset;
|
||||
if (cpu_mode == CPU_MODE_64BIT) {
|
||||
ASSERT(addrsize == 4U || addrsize == 8U,
|
||||
"%s: invalid address size %d for cpu_mode %d",
|
||||
__func__, addrsize, cpu_mode);
|
||||
if (addrsize != 4U && addrsize != 8U) {
|
||||
pr_dbg("%s: invalid addr size %d for cpu mode %d",
|
||||
__func__, addrsize, cpu_mode);
|
||||
return -1;
|
||||
}
|
||||
glasize = 8U;
|
||||
} else {
|
||||
ASSERT(addrsize == 2U || addrsize == 4U,
|
||||
"%s: invalid address size %d for cpu mode %d",
|
||||
__func__, addrsize, cpu_mode);
|
||||
if (addrsize != 2U && addrsize != 4U) {
|
||||
pr_dbg("%s: invalid addr size %d for cpu mode %d",
|
||||
__func__, addrsize, cpu_mode);
|
||||
return -1;
|
||||
}
|
||||
glasize = 4U;
|
||||
/*
|
||||
* If the segment selector is loaded with a NULL selector
|
||||
@ -1508,16 +1504,17 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
* descriptor that is not present. If this was the case then
|
||||
* it would have been checked before the VM-exit.
|
||||
*/
|
||||
ASSERT(SEG_DESC_PRESENT(desc->access),
|
||||
"segment %d not present: %#x", seg, desc->access);
|
||||
if (SEG_DESC_PRESENT(desc->access) != 0) {
|
||||
/* TODO: Inject #NP */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The descriptor type must indicate a code/data segment.
|
||||
*/
|
||||
/* The descriptor type must indicate a code/data segment. */
|
||||
type = SEG_DESC_TYPE(desc->access);
|
||||
ASSERT(type >= 16U && type <= 31U,
|
||||
"segment %d has invalid descriptor type %#x",
|
||||
seg, type);
|
||||
if (type < 16 || type > 31) {
|
||||
/*TODO: Inject #GP */
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((prot & PROT_READ) != 0U) {
|
||||
/* #GP on a read access to a exec-only code segment */
|
||||
|
@ -296,8 +296,6 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu
|
||||
{
|
||||
uint8_t cpl;
|
||||
|
||||
ASSERT(emul_ctxt != NULL && vcpu != NULL, "Error in input arguments");
|
||||
|
||||
cpl = (uint8_t)((csar >> 5) & 3U);
|
||||
emul_ctxt->paging.cr3 = exec_vmread(VMX_GUEST_CR3);
|
||||
emul_ctxt->paging.cpl = cpl;
|
||||
@ -313,6 +311,10 @@ int decode_instruction(struct vcpu *vcpu)
|
||||
enum vm_cpu_mode cpu_mode;
|
||||
|
||||
emul_ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
|
||||
if (emul_ctxt == NULL) {
|
||||
pr_err("%s: Failed to get emul_ctxt", __func__);
|
||||
return -1;
|
||||
}
|
||||
emul_ctxt->vcpu = vcpu;
|
||||
|
||||
retval = vie_init(&emul_ctxt->vie, vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user