hv: vmexit: fix MISRA-C violations related to multiple exits

This patch fixes the MISRA-C violations in arch/x86/vmexit.c
 * make the function have only one exit point

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2018-12-20 14:22:33 +08:00 committed by wenlingz
parent 0a713e6fbc
commit 50f5b0f6bb

View File

@ -160,12 +160,10 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
if (get_cpu_id() != vcpu->pcpu_id) { if (get_cpu_id() != vcpu->pcpu_id) {
pr_fatal("vcpu is not running on its pcpu!"); pr_fatal("vcpu is not running on its pcpu!");
return -EINVAL; ret = -EINVAL;
} } else {
/* Obtain interrupt info */ /* Obtain interrupt info */
vcpu->arch.idt_vectoring_info = vcpu->arch.idt_vectoring_info = exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
/* Filter out HW exception & NMI */ /* Filter out HW exception & NMI */
if ((vcpu->arch.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) { if ((vcpu->arch.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
uint32_t vector_info = vcpu->arch.idt_vectoring_info; uint32_t vector_info = vcpu->arch.idt_vectoring_info;
@ -195,35 +193,29 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
/* Ensure exit reason is within dispatch table */ /* Ensure exit reason is within dispatch table */
if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) { if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) {
pr_err("Invalid Exit Reason: 0x%016llx ", pr_err("Invalid Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
vcpu->arch.exit_reason); ret = -EINVAL;
return -EINVAL; } else {
}
/* Calculate dispatch table entry */ /* Calculate dispatch table entry */
dispatch = (struct vm_exit_dispatch *) dispatch = (struct vm_exit_dispatch *)(dispatch_table + basic_exit_reason);
(dispatch_table + basic_exit_reason);
/* See if an exit qualification is necessary for this exit /* See if an exit qualification is necessary for this exit handler */
* handler
*/
if (dispatch->need_exit_qualification != 0U) { if (dispatch->need_exit_qualification != 0U) {
/* Get exit qualification */ /* Get exit qualification */
vcpu->arch.exit_qualification = vcpu->arch.exit_qualification = exec_vmread(VMX_EXIT_QUALIFICATION);
exec_vmread(VMX_EXIT_QUALIFICATION);
} }
/* exit dispatch handling */ /* exit dispatch handling */
if (basic_exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) { if (basic_exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) {
/* Handling external_interrupt /* Handling external_interrupt should disable intr */
* should disable intr
*/
CPU_IRQ_DISABLE(); CPU_IRQ_DISABLE();
ret = dispatch->handler(vcpu); ret = dispatch->handler(vcpu);
CPU_IRQ_ENABLE(); CPU_IRQ_ENABLE();
} else { } else {
ret = dispatch->handler(vcpu); ret = dispatch->handler(vcpu);
} }
}
}
return ret; return ret;
} }
@ -268,6 +260,7 @@ int32_t cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
uint64_t reg; uint64_t reg;
uint32_t idx; uint32_t idx;
uint64_t exit_qual; uint64_t exit_qual;
int32_t ret = 0;
exit_qual = vcpu->arch.exit_qualification; exit_qual = vcpu->arch.exit_qualification;
idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual); idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual);
@ -305,13 +298,14 @@ int32_t cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
break; break;
default: default:
panic("Unhandled CR access"); panic("Unhandled CR access");
return -EINVAL; ret = -EINVAL;
break;
} }
TRACE_2L(TRACE_VMEXIT_CR_ACCESS, vm_exit_cr_access_type(exit_qual), TRACE_2L(TRACE_VMEXIT_CR_ACCESS, vm_exit_cr_access_type(exit_qual),
vm_exit_cr_access_cr_num(exit_qual)); vm_exit_cr_access_cr_num(exit_qual));
return 0; return ret;
} }
/* /*
@ -322,44 +316,43 @@ static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
{ {
int32_t idx; int32_t idx;
uint64_t val64; uint64_t val64;
int32_t ret = 0;
val64 = exec_vmread(VMX_GUEST_CR4); val64 = exec_vmread(VMX_GUEST_CR4);
if ((val64 & CR4_OSXSAVE) == 0UL) { if ((val64 & CR4_OSXSAVE) == 0UL) {
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return 0; } else {
}
idx = vcpu->arch.cur_context; idx = vcpu->arch.cur_context;
if (idx >= NR_WORLD) { if (idx >= NR_WORLD) {
return -1; ret = -1;
} } else {
/* to access XCR0,'rcx' should be 0 */ /* to access XCR0,'rcx' should be 0 */
if (vcpu_get_gpreg(vcpu, CPU_REG_RCX) != 0UL) { if (vcpu_get_gpreg(vcpu, CPU_REG_RCX) != 0UL) {
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return 0; } else {
}
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) | val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U); (vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
/* bit 0(x87 state) of XCR0 can't be cleared */ /* bit 0(x87 state) of XCR0 can't be cleared */
if ((val64 & 0x01UL) == 0UL) { if ((val64 & 0x01UL) == 0UL) {
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return 0; } else {
} /*
* XCR0[2:1] (SSE state & AVX state) can't not be
/*XCR0[2:1] (SSE state & AVX state) can't not be
* set to 10b as it is necessary to set both bits * set to 10b as it is necessary to set both bits
* to use AVX instructions. * to use AVX instructions.
**/ */
if (((val64 >> 1U) & 0x3UL) == 0x2UL) { if (((val64 >> 1U) & 0x3UL) == 0x2UL) {
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return 0; } else {
write_xcr(0, val64);
}
}
}
}
} }
write_xcr(0, val64); return ret;
return 0;
} }
static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu) static int32_t wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)