mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-20 02:11:14 +00:00
HV: Fix missing brackets for MISRA C Violations
Patch 2 of 7. Added changes to make sure Misra C violations are fixed for rules 11S and 12S. Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
parent
88a3205d3c
commit
dbfd0e5b67
@ -290,10 +290,11 @@ vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval)
|
|||||||
* To obtain the value of a legacy high byte register shift the
|
* To obtain the value of a legacy high byte register shift the
|
||||||
* base register right by 8 bits (%ah = %rax >> 8).
|
* base register right by 8 bits (%ah = %rax >> 8).
|
||||||
*/
|
*/
|
||||||
if (lhbr != 0)
|
if (lhbr != 0) {
|
||||||
*rval = (uint8_t)(val >> 8);
|
*rval = (uint8_t)(val >> 8);
|
||||||
else
|
} else {
|
||||||
*rval = (uint8_t)val;
|
*rval = (uint8_t)val;
|
||||||
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,8 +335,9 @@ vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
|
|||||||
case 1U:
|
case 1U:
|
||||||
case 2U:
|
case 2U:
|
||||||
error = vie_read_register(vcpu, reg, &origval);
|
error = vie_read_register(vcpu, reg, &origval);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
val &= size2mask[size];
|
val &= size2mask[size];
|
||||||
val |= origval & ~size2mask[size];
|
val |= origval & ~size2mask[size];
|
||||||
break;
|
break;
|
||||||
@ -379,15 +381,16 @@ getcc(uint8_t opsize, uint64_t x, uint64_t y)
|
|||||||
ASSERT(opsize == 1U || opsize == 2U || opsize == 4U || opsize == 8U,
|
ASSERT(opsize == 1U || opsize == 2U || opsize == 4U || opsize == 8U,
|
||||||
"getcc: invalid operand size %hhu", opsize);
|
"getcc: invalid operand size %hhu", opsize);
|
||||||
|
|
||||||
if (opsize == 1U)
|
if (opsize == 1U) {
|
||||||
return getcc8((uint8_t)x, (uint8_t)y);
|
return getcc8((uint8_t)x, (uint8_t)y);
|
||||||
else if (opsize == 2U)
|
} else if (opsize == 2U) {
|
||||||
return getcc16((uint16_t)x, (uint16_t)y);
|
return getcc16((uint16_t)x, (uint16_t)y);
|
||||||
else if (opsize == 4U)
|
} else if (opsize == 4U) {
|
||||||
return getcc32((uint32_t)x, (uint32_t)y);
|
return getcc32((uint32_t)x, (uint32_t)y);
|
||||||
else
|
} else {
|
||||||
return getcc64(x, y);
|
return getcc64(x, y);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
||||||
@ -411,9 +414,10 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
*/
|
*/
|
||||||
size = 1U; /* override for byte operation */
|
size = 1U; /* override for byte operation */
|
||||||
error = vie_read_bytereg(vcpu, vie, &byte);
|
error = vie_read_bytereg(vcpu, vie, &byte);
|
||||||
if (error == 0)
|
if (error == 0) {
|
||||||
error = memwrite(vcpu, gpa, byte, size,
|
error = memwrite(vcpu, gpa, byte, size,
|
||||||
arg);
|
arg);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 0x89U:
|
case 0x89U:
|
||||||
/*
|
/*
|
||||||
@ -439,8 +443,9 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
*/
|
*/
|
||||||
size = 1U; /* override for byte operation */
|
size = 1U; /* override for byte operation */
|
||||||
error = memread(vcpu, gpa, &val, size, arg);
|
error = memread(vcpu, gpa, &val, size, arg);
|
||||||
if (error == 0)
|
if (error == 0) {
|
||||||
error = vie_write_bytereg(vcpu, vie, (uint8_t)val);
|
error = vie_write_bytereg(vcpu, vie, (uint8_t)val);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 0x8BU:
|
case 0x8BU:
|
||||||
/*
|
/*
|
||||||
@ -539,8 +544,9 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &val, 1U, arg);
|
error = memread(vcpu, gpa, &val, 1U, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
@ -560,8 +566,9 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
* REX.W + 0F B7/r movzx r64, r/m16
|
* REX.W + 0F B7/r movzx r64, r/m16
|
||||||
*/
|
*/
|
||||||
error = memread(vcpu, gpa, &val, 2U, arg);
|
error = memread(vcpu, gpa, &val, 2U, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
|
|
||||||
@ -582,8 +589,9 @@ emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &val, 1U, arg);
|
error = memread(vcpu, gpa, &val, 1U, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
@ -626,22 +634,24 @@ get_gla(struct vcpu *vcpu, __unused struct vie *vie,
|
|||||||
|
|
||||||
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
|
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
|
||||||
addrsize, prot, gla) != 0) {
|
addrsize, prot, gla) != 0) {
|
||||||
if (seg == CPU_REG_SS)
|
if (seg == CPU_REG_SS) {
|
||||||
/*vm_inject_ss(vcpu, 0);*/
|
/*vm_inject_ss(vcpu, 0);*/
|
||||||
pr_err("TODO: inject ss exception");
|
pr_err("TODO: inject ss exception");
|
||||||
else
|
} else {
|
||||||
/*vm_inject_gp(vcpu);*/
|
/*vm_inject_gp(vcpu);*/
|
||||||
pr_err("TODO: inject gp exception");
|
pr_err("TODO: inject gp exception");
|
||||||
|
}
|
||||||
goto guest_fault;
|
goto guest_fault;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vie_canonical_check(paging->cpu_mode, *gla) != 0) {
|
if (vie_canonical_check(paging->cpu_mode, *gla) != 0) {
|
||||||
if (seg == CPU_REG_SS)
|
if (seg == CPU_REG_SS) {
|
||||||
/*vm_inject_ss(vcpu, 0);*/
|
/*vm_inject_ss(vcpu, 0);*/
|
||||||
pr_err("TODO: inject ss exception");
|
pr_err("TODO: inject ss exception");
|
||||||
else
|
} else {
|
||||||
/*vm_inject_gp(vcpu);*/
|
/*vm_inject_gp(vcpu);*/
|
||||||
pr_err("TODO: inject gp exception");
|
pr_err("TODO: inject gp exception");
|
||||||
|
}
|
||||||
goto guest_fault;
|
goto guest_fault;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -701,14 +711,16 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
|
|||||||
seg = (vie->segment_override != 0U) ? (vie->segment_register) : CPU_REG_DS;
|
seg = (vie->segment_override != 0U) ? (vie->segment_register) : CPU_REG_DS;
|
||||||
error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
|
error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
|
||||||
PROT_READ, seg, CPU_REG_RSI, &srcaddr, &fault);
|
PROT_READ, seg, CPU_REG_RSI, &srcaddr, &fault);
|
||||||
if ((error != 0) || (fault != 0))
|
if ((error != 0) || (fault != 0)) {
|
||||||
goto done;
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
|
error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
|
||||||
PROT_WRITE, CPU_REG_ES, CPU_REG_RDI, &dstaddr,
|
PROT_WRITE, CPU_REG_ES, CPU_REG_RDI, &dstaddr,
|
||||||
&fault);
|
&fault);
|
||||||
if ((error != 0) || (fault != 0))
|
if ((error != 0) || (fault != 0)) {
|
||||||
goto done;
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
(void)memcpy_s((char *)dstaddr, 16U, (char *)srcaddr, opsize);
|
(void)memcpy_s((char *)dstaddr, 16U, (char *)srcaddr, opsize);
|
||||||
|
|
||||||
@ -746,9 +758,10 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
|
|||||||
/*
|
/*
|
||||||
* Repeat the instruction if the count register is not zero.
|
* Repeat the instruction if the count register is not zero.
|
||||||
*/
|
*/
|
||||||
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL)
|
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL) {
|
||||||
vcpu_retain_rip(vcpu);
|
vcpu_retain_rip(vcpu);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
done:
|
done:
|
||||||
ASSERT(error == 0, "%s: unexpected error %d", __func__, error);
|
ASSERT(error == 0, "%s: unexpected error %d", __func__, error);
|
||||||
return error;
|
return error;
|
||||||
@ -776,16 +789,18 @@ emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
* The count register is %rcx, %ecx or %cx depending on the
|
* The count register is %rcx, %ecx or %cx depending on the
|
||||||
* address size of the instruction.
|
* address size of the instruction.
|
||||||
*/
|
*/
|
||||||
if ((rcx & vie_size2mask(vie->addrsize)) == 0UL)
|
if ((rcx & vie_size2mask(vie->addrsize)) == 0UL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RAX, &val);
|
error = vie_read_register(vcpu, CPU_REG_RAX, &val);
|
||||||
ASSERT(error == 0, "%s: error %d getting rax", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting rax", __func__, error);
|
||||||
|
|
||||||
error = memwrite(vcpu, gpa, val, opsize, arg);
|
error = memwrite(vcpu, gpa, val, opsize, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RDI, &rdi);
|
error = vie_read_register(vcpu, CPU_REG_RDI, &rdi);
|
||||||
ASSERT(error == 0, "%s: error %d getting rdi", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting rdi", __func__, error);
|
||||||
@ -793,10 +808,11 @@ emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
ASSERT(error == 0, "%s: error %d getting rflags", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting rflags", __func__, error);
|
||||||
|
|
||||||
if ((rflags & PSL_D) != 0U)
|
if ((rflags & PSL_D) != 0U) {
|
||||||
rdi -= opsize;
|
rdi -= opsize;
|
||||||
else
|
} else {
|
||||||
rdi += opsize;
|
rdi += opsize;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_update_register(vcpu, CPU_REG_RDI, rdi,
|
error = vie_update_register(vcpu, CPU_REG_RDI, rdi,
|
||||||
vie->addrsize);
|
vie->addrsize);
|
||||||
@ -811,9 +827,10 @@ emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
/*
|
/*
|
||||||
* Repeat the instruction if the count register is not zero.
|
* Repeat the instruction if the count register is not zero.
|
||||||
*/
|
*/
|
||||||
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL)
|
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL) {
|
||||||
vcpu_retain_rip(vcpu);
|
vcpu_retain_rip(vcpu);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -852,13 +869,15 @@ emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
error = vie_read_register(vcpu, reg, &val1);
|
error = vie_read_register(vcpu, reg, &val1);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
error = memread(vcpu, gpa, &val2, size, arg);
|
error = memread(vcpu, gpa, &val2, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* perform the operation and write the result */
|
/* perform the operation and write the result */
|
||||||
result = val1 & val2;
|
result = val1 & val2;
|
||||||
@ -866,12 +885,14 @@ emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
||||||
@ -914,13 +935,15 @@ emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
error = vie_read_register(vcpu, reg, &val1);
|
error = vie_read_register(vcpu, reg, &val1);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
error = memread(vcpu, gpa, &val2, size, arg);
|
error = memread(vcpu, gpa, &val2, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* perform the operation and write the result */
|
/* perform the operation and write the result */
|
||||||
result = val1 & val2;
|
result = val1 & val2;
|
||||||
@ -944,8 +967,9 @@ emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &val1, size, arg);
|
error = memread(vcpu, gpa, &val1, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* perform the operation with the pre-fetched immediate
|
* perform the operation with the pre-fetched immediate
|
||||||
@ -957,12 +981,14 @@ emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
||||||
@ -1013,8 +1039,9 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &val1, size, arg);
|
error = memread(vcpu, gpa, &val1, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* perform the operation with the pre-fetched immediate
|
* perform the operation with the pre-fetched immediate
|
||||||
@ -1033,14 +1060,16 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &val1, size, arg);
|
error = memread(vcpu, gpa, &val1, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
error = vie_read_register(vcpu, reg, &val2);
|
error = vie_read_register(vcpu, reg, &val2);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* perform the operation and write the result */
|
/* perform the operation and write the result */
|
||||||
result = val1 | val2;
|
result = val1 | val2;
|
||||||
@ -1051,12 +1080,14 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
* OF and CF are cleared; the SF, ZF and PF flags are set according
|
||||||
@ -1104,13 +1135,15 @@ emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
/* Get the register operand */
|
/* Get the register operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
error = vie_read_register(vcpu, reg, ®op);
|
error = vie_read_register(vcpu, reg, ®op);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the memory operand */
|
/* Get the memory operand */
|
||||||
error = memread(vcpu, gpa, &memop, size, arg);
|
error = memread(vcpu, gpa, &memop, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
if (vie->op.op_byte == 0x3BU) {
|
if (vie->op.op_byte == 0x3BU) {
|
||||||
op1 = regop;
|
op1 = regop;
|
||||||
@ -1147,13 +1180,15 @@ emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
* the status flags.
|
* the status flags.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
if (vie->op.op_byte == 0x80U)
|
if (vie->op.op_byte == 0x80U) {
|
||||||
size = 1U;
|
size = 1U;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
error = memread(vcpu, gpa, &op1, size, arg);
|
error = memread(vcpu, gpa, &op1, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
rflags2 = getcc(size, op1, (uint64_t)vie->immediate);
|
rflags2 = getcc(size, op1, (uint64_t)vie->immediate);
|
||||||
break;
|
break;
|
||||||
@ -1161,8 +1196,9 @@ emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
rflags &= ~RFLAGS_STATUS_BITS;
|
rflags &= ~RFLAGS_STATUS_BITS;
|
||||||
rflags |= rflags2 & RFLAGS_STATUS_BITS;
|
rflags |= rflags2 & RFLAGS_STATUS_BITS;
|
||||||
size = 8U;
|
size = 8U;
|
||||||
@ -1196,13 +1232,15 @@ emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
/* get the first operand */
|
/* get the first operand */
|
||||||
reg = gpr_map[vie->reg];
|
reg = gpr_map[vie->reg];
|
||||||
error = vie_read_register(vcpu, reg, &val1);
|
error = vie_read_register(vcpu, reg, &val1);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the second operand */
|
/* get the second operand */
|
||||||
error = memread(vcpu, gpa, &val2, size, arg);
|
error = memread(vcpu, gpa, &val2, size, arg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* perform the operation and write the result */
|
/* perform the operation and write the result */
|
||||||
nval = val1 - val2;
|
nval = val1 - val2;
|
||||||
@ -1217,8 +1255,9 @@ emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
rflags2 = getcc(size, val1, val2);
|
rflags2 = getcc(size, val1, val2);
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS,
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS,
|
||||||
&rflags);
|
&rflags);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
rflags &= ~RFLAGS_STATUS_BITS;
|
rflags &= ~RFLAGS_STATUS_BITS;
|
||||||
rflags |= rflags2 & RFLAGS_STATUS_BITS;
|
rflags |= rflags2 & RFLAGS_STATUS_BITS;
|
||||||
@ -1271,11 +1310,12 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
error = vm_get_seg_desc(vcpu, CPU_REG_SS, &ss_desc);
|
error = vm_get_seg_desc(vcpu, CPU_REG_SS, &ss_desc);
|
||||||
ASSERT(error == 0, "%s: error %d getting SS descriptor",
|
ASSERT(error == 0, "%s: error %d getting SS descriptor",
|
||||||
__func__, error);
|
__func__, error);
|
||||||
if (SEG_DESC_DEF32(ss_desc.access))
|
if (SEG_DESC_DEF32(ss_desc.access)) {
|
||||||
stackaddrsize = 4U;
|
stackaddrsize = 4U;
|
||||||
else
|
} else {
|
||||||
stackaddrsize = 2U;
|
stackaddrsize = 2U;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_CR0, &cr0);
|
error = vie_read_register(vcpu, CPU_REG_CR0, &cr0);
|
||||||
ASSERT(error == 0, "%s: error %d getting cr0", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting cr0", __func__, error);
|
||||||
@ -1285,8 +1325,9 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RSP, &rsp);
|
error = vie_read_register(vcpu, CPU_REG_RSP, &rsp);
|
||||||
ASSERT(error == 0, "%s: error %d getting rsp", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting rsp", __func__, error);
|
||||||
if (pushop != 0)
|
if (pushop != 0) {
|
||||||
rsp -= size;
|
rsp -= size;
|
||||||
|
}
|
||||||
|
|
||||||
if (vie_calculate_gla(paging->cpu_mode, CPU_REG_SS, &ss_desc,
|
if (vie_calculate_gla(paging->cpu_mode, CPU_REG_SS, &ss_desc,
|
||||||
rsp, size, stackaddrsize, (pushop != 0)? PROT_WRITE : PROT_READ,
|
rsp, size, stackaddrsize, (pushop != 0)? PROT_WRITE : PROT_READ,
|
||||||
@ -1311,22 +1352,26 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
* take care of data trans if stack_gpa be used for memwrite in
|
* take care of data trans if stack_gpa be used for memwrite in
|
||||||
* the future.
|
* the future.
|
||||||
*/
|
*/
|
||||||
if (pushop != 0)
|
if (pushop != 0) {
|
||||||
err_code |= PAGE_FAULT_WR_FLAG;
|
err_code |= PAGE_FAULT_WR_FLAG;
|
||||||
|
}
|
||||||
error = gva2gpa(vcpu, stack_gla, &stack_gpa, &err_code);
|
error = gva2gpa(vcpu, stack_gla, &stack_gpa, &err_code);
|
||||||
if (error == -EFAULT) {
|
if (error == -EFAULT) {
|
||||||
vcpu_inject_pf(vcpu, stack_gla, err_code);
|
vcpu_inject_pf(vcpu, stack_gla, err_code);
|
||||||
return error;
|
return error;
|
||||||
} else if (error < 0)
|
} else if (error < 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
if (pushop != 0) {
|
if (pushop != 0) {
|
||||||
error = memread(vcpu, mmio_gpa, &val, size, arg);
|
error = memread(vcpu, mmio_gpa, &val, size, arg);
|
||||||
if (error == 0)
|
if (error == 0) {
|
||||||
error = memwrite(vcpu, stack_gpa, val, size, arg);
|
error = memwrite(vcpu, stack_gpa, val, size, arg);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
error = memread(vcpu, stack_gpa, &val, size, arg);
|
error = memread(vcpu, stack_gpa, &val, size, arg);
|
||||||
if (error == 0)
|
if (error == 0) {
|
||||||
error = memwrite(vcpu, mmio_gpa, val, size, arg);
|
error = memwrite(vcpu, mmio_gpa, val, size, arg);
|
||||||
|
}
|
||||||
rsp += size;
|
rsp += size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1352,8 +1397,9 @@ emulate_push(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
* PUSH is part of the group 5 extended opcodes and is identified
|
* PUSH is part of the group 5 extended opcodes and is identified
|
||||||
* by ModRM:reg = b110.
|
* by ModRM:reg = b110.
|
||||||
*/
|
*/
|
||||||
if ((vie->reg & 7U) != 6U)
|
if ((vie->reg & 7U) != 6U) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
|
error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
|
||||||
memwrite, arg);
|
memwrite, arg);
|
||||||
@ -1373,8 +1419,9 @@ emulate_pop(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
|
|||||||
* POP is part of the group 1A extended opcodes and is identified
|
* POP is part of the group 1A extended opcodes and is identified
|
||||||
* by ModRM:reg = b000.
|
* by ModRM:reg = b000.
|
||||||
*/
|
*/
|
||||||
if ((vie->reg & 7U) != 0)
|
if ((vie->reg & 7U) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
|
error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
|
||||||
memwrite, arg);
|
memwrite, arg);
|
||||||
@ -1426,15 +1473,17 @@ emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
* Currently we only emulate the 'Bit Test' instruction which is
|
* Currently we only emulate the 'Bit Test' instruction which is
|
||||||
* identified by a ModR/M:reg encoding of 100b.
|
* identified by a ModR/M:reg encoding of 100b.
|
||||||
*/
|
*/
|
||||||
if ((vie->reg & 7U) != 4U)
|
if ((vie->reg & 7U) != 4U) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
error = vie_read_register(vcpu, CPU_REG_RFLAGS, &rflags);
|
||||||
ASSERT(error == 0, "%s: error %d getting rflags", __func__, error);
|
ASSERT(error == 0, "%s: error %d getting rflags", __func__, error);
|
||||||
|
|
||||||
error = memread(vcpu, gpa, &val, vie->opsize, memarg);
|
error = memread(vcpu, gpa, &val, vie->opsize, memarg);
|
||||||
if (error != 0)
|
if (error != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel SDM, Vol 2, Table 3-2:
|
* Intel SDM, Vol 2, Table 3-2:
|
||||||
@ -1444,10 +1493,11 @@ emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
bitoff = (uint64_t)vie->immediate & bitmask;
|
bitoff = (uint64_t)vie->immediate & bitmask;
|
||||||
|
|
||||||
/* Copy the bit into the Carry flag in %rflags */
|
/* Copy the bit into the Carry flag in %rflags */
|
||||||
if ((val & (1UL << bitoff)) != 0U)
|
if ((val & (1UL << bitoff)) != 0U) {
|
||||||
rflags |= PSL_C;
|
rflags |= PSL_C;
|
||||||
else
|
} else {
|
||||||
rflags &= ~PSL_C;
|
rflags &= ~PSL_C;
|
||||||
|
}
|
||||||
size = 8U;
|
size = 8U;
|
||||||
error = vie_update_register(vcpu, CPU_REG_RFLAGS, rflags, size);
|
error = vie_update_register(vcpu, CPU_REG_RFLAGS, rflags, size);
|
||||||
ASSERT(error == 0, "%s: error %d updating rflags", __func__, error);
|
ASSERT(error == 0, "%s: error %d updating rflags", __func__, error);
|
||||||
@ -1462,8 +1512,9 @@ vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (vie->decoded == 0U)
|
if (vie->decoded == 0U) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
switch (vie->op.op_type) {
|
switch (vie->op.op_type) {
|
||||||
case VIE_OP_TYPE_GROUP1:
|
case VIE_OP_TYPE_GROUP1:
|
||||||
error = emulate_group1(vcpu, gpa, vie, paging,
|
error = emulate_group1(vcpu, gpa, vie, paging,
|
||||||
@ -1532,8 +1583,9 @@ vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0, uint64_t rf, uint64
|
|||||||
"%s: invalid size %hhu", __func__, size);
|
"%s: invalid size %hhu", __func__, size);
|
||||||
ASSERT(cpl <= 3U, "%s: invalid cpl %d", __func__, cpl);
|
ASSERT(cpl <= 3U, "%s: invalid cpl %d", __func__, cpl);
|
||||||
|
|
||||||
if (cpl != 3U || (cr0 & CR0_AM) == 0UL || (rf & PSL_AC) == 0UL)
|
if (cpl != 3U || (cr0 & CR0_AM) == 0UL || (rf & PSL_AC) == 0UL) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return ((gla & (size - 1U)) != 0UL) ? 1 : 0;
|
return ((gla & (size - 1U)) != 0UL) ? 1 : 0;
|
||||||
}
|
}
|
||||||
@ -1543,19 +1595,21 @@ vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
|
|||||||
{
|
{
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
|
|
||||||
if (cpu_mode != CPU_MODE_64BIT)
|
if (cpu_mode != CPU_MODE_64BIT) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The value of the bit 47 in the 'gla' should be replicated in the
|
* The value of the bit 47 in the 'gla' should be replicated in the
|
||||||
* most significant 16 bits.
|
* most significant 16 bits.
|
||||||
*/
|
*/
|
||||||
mask = ~((1UL << 48) - 1);
|
mask = ~((1UL << 48) - 1);
|
||||||
if ((gla & (1UL << 47)) != 0U)
|
if ((gla & (1UL << 47)) != 0U) {
|
||||||
return ((gla & mask) != mask) ? 1 : 0;
|
return ((gla & mask) != mask) ? 1 : 0;
|
||||||
else
|
} else {
|
||||||
return ((gla & mask) != 0U) ? 1 : 0;
|
return ((gla & mask) != 0U) ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
vie_size2mask(uint8_t size)
|
vie_size2mask(uint8_t size)
|
||||||
@ -1597,8 +1651,9 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
|||||||
* then the descriptor is unusable and attempting to use
|
* then the descriptor is unusable and attempting to use
|
||||||
* it results in a #GP(0).
|
* it results in a #GP(0).
|
||||||
*/
|
*/
|
||||||
if (SEG_DESC_UNUSABLE(desc->access))
|
if (SEG_DESC_UNUSABLE(desc->access)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The processor generates a #NP exception when a segment
|
* The processor generates a #NP exception when a segment
|
||||||
@ -1619,21 +1674,24 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
|||||||
|
|
||||||
if ((prot & PROT_READ) != 0U) {
|
if ((prot & PROT_READ) != 0U) {
|
||||||
/* #GP on a read access to a exec-only code segment */
|
/* #GP on a read access to a exec-only code segment */
|
||||||
if ((type & 0xAU) == 0x8U)
|
if ((type & 0xAU) == 0x8U) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ((prot & PROT_WRITE) != 0U) {
|
if ((prot & PROT_WRITE) != 0U) {
|
||||||
/*
|
/*
|
||||||
* #GP on a write access to a code segment or a
|
* #GP on a write access to a code segment or a
|
||||||
* read-only data segment.
|
* read-only data segment.
|
||||||
*/
|
*/
|
||||||
if ((type & 0x8U) != 0U) /* code segment */
|
if ((type & 0x8U) != 0U) { /* code segment */
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if ((type & 0xAU) == 0U) /* read-only data seg */
|
if ((type & 0xAU) == 0U) { /* read-only data seg */
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'desc->limit' is fully expanded taking granularity into
|
* 'desc->limit' is fully expanded taking granularity into
|
||||||
@ -1652,8 +1710,9 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
|||||||
|
|
||||||
while (length > 0U) {
|
while (length > 0U) {
|
||||||
offset &= vie_size2mask(addrsize);
|
offset &= vie_size2mask(addrsize);
|
||||||
if (offset < low_limit || offset > high_limit)
|
if (offset < low_limit || offset > high_limit) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
offset++;
|
offset++;
|
||||||
length--;
|
length--;
|
||||||
}
|
}
|
||||||
@ -1706,8 +1765,9 @@ vie_init(struct vie *vie, struct vcpu *vcpu)
|
|||||||
if (ret == -EFAULT) {
|
if (ret == -EFAULT) {
|
||||||
vcpu_inject_pf(vcpu, guest_rip_gva, err_code);
|
vcpu_inject_pf(vcpu, guest_rip_gva, err_code);
|
||||||
return ret;
|
return ret;
|
||||||
} else if (ret < 0)
|
} else if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
vie->num_valid = (uint8_t)inst_len;
|
vie->num_valid = (uint8_t)inst_len;
|
||||||
|
|
||||||
@ -1721,9 +1781,10 @@ vie_peek(struct vie *vie, uint8_t *x)
|
|||||||
if (vie->num_processed < vie->num_valid) {
|
if (vie->num_processed < vie->num_valid) {
|
||||||
*x = vie->inst[vie->num_processed];
|
*x = vie->inst[vie->num_processed];
|
||||||
return 0;
|
return 0;
|
||||||
} else
|
} else {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
vie_advance(struct vie *vie)
|
vie_advance(struct vie *vie)
|
||||||
@ -1767,21 +1828,23 @@ decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, bool cs_d)
|
|||||||
uint8_t x;
|
uint8_t x;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (x == 0x66U)
|
if (x == 0x66U) {
|
||||||
vie->opsize_override = 1U;
|
vie->opsize_override = 1U;
|
||||||
else if (x == 0x67U)
|
} else if (x == 0x67U) {
|
||||||
vie->addrsize_override = 1U;
|
vie->addrsize_override = 1U;
|
||||||
else if (x == 0xF3U)
|
} else if (x == 0xF3U) {
|
||||||
vie->repz_present = 1U;
|
vie->repz_present = 1U;
|
||||||
else if (x == 0xF2U)
|
} else if (x == 0xF2U) {
|
||||||
vie->repnz_present = 1U;
|
vie->repnz_present = 1U;
|
||||||
else if (segment_override(x, &vie->segment_register))
|
} else if (segment_override(x, &vie->segment_register)) {
|
||||||
vie->segment_override = 1U;
|
vie->segment_override = 1U;
|
||||||
else
|
} else {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
}
|
}
|
||||||
@ -1812,12 +1875,13 @@ decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, bool cs_d)
|
|||||||
* is 32-bits.
|
* is 32-bits.
|
||||||
*/
|
*/
|
||||||
vie->addrsize = (vie->addrsize_override != 0U)? 4U : 8U;
|
vie->addrsize = (vie->addrsize_override != 0U)? 4U : 8U;
|
||||||
if (vie->rex_w != 0U)
|
if (vie->rex_w != 0U) {
|
||||||
vie->opsize = 8U;
|
vie->opsize = 8U;
|
||||||
else if (vie->opsize_override != 0U)
|
} else if (vie->opsize_override != 0U) {
|
||||||
vie->opsize = 2U;
|
vie->opsize = 2U;
|
||||||
else
|
} else {
|
||||||
vie->opsize = 4U;
|
vie->opsize = 4U;
|
||||||
|
}
|
||||||
} else if (cs_d) {
|
} else if (cs_d) {
|
||||||
/* Default address and operand sizes are 32-bits */
|
/* Default address and operand sizes are 32-bits */
|
||||||
vie->addrsize = vie->addrsize_override != 0U ? 2U : 4U;
|
vie->addrsize = vie->addrsize_override != 0U ? 2U : 4U;
|
||||||
@ -1835,13 +1899,15 @@ decode_two_byte_opcode(struct vie *vie)
|
|||||||
{
|
{
|
||||||
uint8_t x;
|
uint8_t x;
|
||||||
|
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie->op = two_byte_opcodes[x];
|
vie->op = two_byte_opcodes[x];
|
||||||
|
|
||||||
if (vie->op.op_type == VIE_OP_TYPE_NONE)
|
if (vie->op.op_type == VIE_OP_TYPE_NONE) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
return 0;
|
return 0;
|
||||||
@ -1852,18 +1918,21 @@ decode_opcode(struct vie *vie)
|
|||||||
{
|
{
|
||||||
uint8_t x;
|
uint8_t x;
|
||||||
|
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie->op = one_byte_opcodes[x];
|
vie->op = one_byte_opcodes[x];
|
||||||
|
|
||||||
if (vie->op.op_type == VIE_OP_TYPE_NONE)
|
if (vie->op.op_type == VIE_OP_TYPE_NONE) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
|
|
||||||
if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
|
if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE) {
|
||||||
return decode_two_byte_opcode(vie);
|
return decode_two_byte_opcode(vie);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1873,14 +1942,17 @@ decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
|
|||||||
{
|
{
|
||||||
uint8_t x;
|
uint8_t x;
|
||||||
|
|
||||||
if ((vie->op.op_flags & VIE_OP_F_NO_MODRM) != 0U)
|
if ((vie->op.op_flags & VIE_OP_F_NO_MODRM) != 0U) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_mode == CPU_MODE_REAL)
|
if (cpu_mode == CPU_MODE_REAL) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie->mod = (x >> 6) & 0x3U;
|
vie->mod = (x >> 6) & 0x3U;
|
||||||
vie->rm = (x >> 0) & 0x7U;
|
vie->rm = (x >> 0) & 0x7U;
|
||||||
@ -1891,8 +1963,9 @@ decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
|
|||||||
* fault. There has to be a memory access involved to cause the
|
* fault. There has to be a memory access involved to cause the
|
||||||
* EPT fault.
|
* EPT fault.
|
||||||
*/
|
*/
|
||||||
if (vie->mod == VIE_MOD_DIRECT)
|
if (vie->mod == VIE_MOD_DIRECT) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
|
if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
|
||||||
(vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
|
(vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
|
||||||
@ -1915,8 +1988,9 @@ decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
|
|||||||
vie->reg |= (vie->rex_r << 3);
|
vie->reg |= (vie->rex_r << 3);
|
||||||
|
|
||||||
/* SIB */
|
/* SIB */
|
||||||
if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
|
if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB) {
|
||||||
goto done;
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
vie->base_register = gpr_map[vie->rm];
|
vie->base_register = gpr_map[vie->rm];
|
||||||
|
|
||||||
@ -1937,11 +2011,13 @@ decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
|
|||||||
* whereas in compatibility mode it just implies disp32.
|
* whereas in compatibility mode it just implies disp32.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (cpu_mode == CPU_MODE_64BIT)
|
if (cpu_mode == CPU_MODE_64BIT) {
|
||||||
vie->base_register = CPU_REG_RIP;
|
vie->base_register = CPU_REG_RIP;
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
vie->base_register = CPU_REG_LAST;
|
vie->base_register = CPU_REG_LAST;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1957,11 +2033,13 @@ decode_sib(struct vie *vie)
|
|||||||
uint8_t x;
|
uint8_t x;
|
||||||
|
|
||||||
/* Proceed only if SIB byte is present */
|
/* Proceed only if SIB byte is present */
|
||||||
if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
|
if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/* De-construct the SIB byte */
|
/* De-construct the SIB byte */
|
||||||
vie->ss = (x >> 6) & 0x3U;
|
vie->ss = (x >> 6) & 0x3U;
|
||||||
@ -2008,8 +2086,9 @@ decode_sib(struct vie *vie)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* 'scale' makes sense only in the context of an index register */
|
/* 'scale' makes sense only in the context of an index register */
|
||||||
if (vie->index_register < CPU_REG_LAST)
|
if (vie->index_register < CPU_REG_LAST) {
|
||||||
vie->scale = 1U << vie->ss;
|
vie->scale = 1U << vie->ss;
|
||||||
|
}
|
||||||
|
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
|
|
||||||
@ -2029,24 +2108,28 @@ decode_displacement(struct vie *vie)
|
|||||||
} u;
|
} u;
|
||||||
|
|
||||||
n = vie->disp_bytes;
|
n = vie->disp_bytes;
|
||||||
if (n == 0)
|
if (n == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (n != 1 && n != 4)
|
if (n != 1 && n != 4) {
|
||||||
panic("decode_displacement: invalid disp_bytes %d", n);
|
panic("decode_displacement: invalid disp_bytes %d", n);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
u.buf[i] = x;
|
u.buf[i] = x;
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n == 1)
|
if (n == 1) {
|
||||||
vie->displacement = u.signed8; /* sign-extended */
|
vie->displacement = u.signed8; /* sign-extended */
|
||||||
else
|
} else {
|
||||||
vie->displacement = u.signed32; /* sign-extended */
|
vie->displacement = u.signed32; /* sign-extended */
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2072,36 +2155,41 @@ decode_immediate(struct vie *vie)
|
|||||||
* processor sign-extends all immediates to 64-bits prior
|
* processor sign-extends all immediates to 64-bits prior
|
||||||
* to their use.
|
* to their use.
|
||||||
*/
|
*/
|
||||||
if (vie->opsize == 4U || vie->opsize == 8U)
|
if (vie->opsize == 4U || vie->opsize == 8U) {
|
||||||
vie->imm_bytes = 4U;
|
vie->imm_bytes = 4U;
|
||||||
else
|
}
|
||||||
|
else {
|
||||||
vie->imm_bytes = 2U;
|
vie->imm_bytes = 2U;
|
||||||
|
}
|
||||||
} else if ((vie->op.op_flags & VIE_OP_F_IMM8) != 0U) {
|
} else if ((vie->op.op_flags & VIE_OP_F_IMM8) != 0U) {
|
||||||
vie->imm_bytes = 1U;
|
vie->imm_bytes = 1U;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = vie->imm_bytes;
|
n = vie->imm_bytes;
|
||||||
if (n == 0)
|
if (n == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(n == 1 || n == 2 || n == 4,
|
ASSERT(n == 1 || n == 2 || n == 4,
|
||||||
"%s: invalid number of immediate bytes: %d", __func__, n);
|
"%s: invalid number of immediate bytes: %d", __func__, n);
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
u.buf[i] = x;
|
u.buf[i] = x;
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sign-extend the immediate value before use */
|
/* sign-extend the immediate value before use */
|
||||||
if (n == 1)
|
if (n == 1) {
|
||||||
vie->immediate = u.signed8;
|
vie->immediate = u.signed8;
|
||||||
else if (n == 2)
|
} else if (n == 2) {
|
||||||
vie->immediate = u.signed16;
|
vie->immediate = u.signed16;
|
||||||
else
|
} else {
|
||||||
vie->immediate = u.signed32;
|
vie->immediate = u.signed32;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2115,8 +2203,9 @@ decode_moffset(struct vie *vie)
|
|||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
} u;
|
} u;
|
||||||
|
|
||||||
if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0U)
|
if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0U) {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
|
* Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
|
||||||
@ -2127,8 +2216,9 @@ decode_moffset(struct vie *vie)
|
|||||||
|
|
||||||
u.u64 = 0UL;
|
u.u64 = 0UL;
|
||||||
for (i = 0U; i < n; i++) {
|
for (i = 0U; i < n; i++) {
|
||||||
if (vie_peek(vie, &x) != 0)
|
if (vie_peek(vie, &x) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
u.buf[i] = x;
|
u.buf[i] = x;
|
||||||
vie_advance(vie);
|
vie_advance(vie);
|
||||||
@ -2140,26 +2230,33 @@ decode_moffset(struct vie *vie)
|
|||||||
int
|
int
|
||||||
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct vie *vie)
|
__decode_instruction(enum vm_cpu_mode cpu_mode, bool cs_d, struct vie *vie)
|
||||||
{
|
{
|
||||||
if (decode_prefixes(vie, cpu_mode, cs_d) != 0)
|
if (decode_prefixes(vie, cpu_mode, cs_d) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_opcode(vie) != 0)
|
if (decode_opcode(vie) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_modrm(vie, cpu_mode) != 0)
|
if (decode_modrm(vie, cpu_mode) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_sib(vie) != 0)
|
if (decode_sib(vie) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_displacement(vie) != 0)
|
if (decode_displacement(vie) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_immediate(vie) != 0)
|
if (decode_immediate(vie) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_moffset(vie) != 0)
|
if (decode_moffset(vie) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
vie->decoded = 1U; /* success */
|
vie->decoded = 1U; /* success */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user