hv: vmcs: fix MISRA-C violations related to multiple exits

This patch fixes the MISRA-C violations in arch/x86/vmcs.c.
 * make the function have only one exit point

v1 -> v2:
 * update 'is_cr0_write_valid' and 'is_cr4_write_valid'
   use `if ... else` rather than check 'ret' value

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2018-12-18 15:55:35 +08:00 committed by wenlingz
parent 4618a6b17d
commit e283e77424

View File

@ -17,6 +17,7 @@ static uint64_t cr4_always_off_mask;
bool is_vmx_disabled(void) bool is_vmx_disabled(void)
{ {
uint64_t msr_val; uint64_t msr_val;
bool ret = false;
/* Read Feature ControL MSR */ /* Read Feature ControL MSR */
msr_val = msr_read(MSR_IA32_FEATURE_CONTROL); msr_val = msr_read(MSR_IA32_FEATURE_CONTROL);
@ -24,9 +25,10 @@ bool is_vmx_disabled(void)
/* Check if feature control is locked and vmx cannot be enabled */ /* Check if feature control is locked and vmx cannot be enabled */
if (((msr_val & MSR_IA32_FEATURE_CONTROL_LOCK) != 0U) && if (((msr_val & MSR_IA32_FEATURE_CONTROL_LOCK) != 0U) &&
((msr_val & MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX) == 0U)) { ((msr_val & MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX) == 0U)) {
return true; ret = true;
} }
return false;
return ret;
} }
static void init_cr0_cr4_host_mask(void) static void init_cr0_cr4_host_mask(void)
@ -96,16 +98,18 @@ int32_t vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
{ {
uint32_t i; uint32_t i;
uint64_t field; uint64_t field;
int32_t ret = 0;
for (i = 0U; i < 8U; i++) { for (i = 0U; i < 8U; i++) {
field = (value >> (i * 8U)) & 0xffUL; field = (value >> (i * 8U)) & 0xffUL;
if (pat_mem_type_invalid(field) || if (pat_mem_type_invalid(field) || ((PAT_FIELD_RSV_BITS & field) != 0UL)) {
((PAT_FIELD_RSV_BITS & field) != 0UL)) {
pr_err("invalid guest IA32_PAT: 0x%016llx", value); pr_err("invalid guest IA32_PAT: 0x%016llx", value);
return -EINVAL; ret = -EINVAL;
break;
} }
} }
if (ret == 0) {
vcpu_set_guest_msr(vcpu, MSR_IA32_PAT, value); vcpu_set_guest_msr(vcpu, MSR_IA32_PAT, value);
/* /*
@ -115,8 +119,9 @@ int32_t vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
if ((vcpu_get_cr0(vcpu) & CR0_CD) == 0UL) { if ((vcpu_get_cr0(vcpu) & CR0_CD) == 0UL) {
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value); exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value);
} }
}
return 0; return ret;
} }
static void load_pdptrs(const struct acrn_vcpu *vcpu) static void load_pdptrs(const struct acrn_vcpu *vcpu)
@ -135,11 +140,12 @@ static void load_pdptrs(const struct acrn_vcpu *vcpu)
static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0) static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
{ {
bool ret = true;
/* Shouldn't set always off bit */ /* Shouldn't set always off bit */
if ((cr0 & cr0_always_off_mask) != 0UL) { if ((cr0 & cr0_always_off_mask) != 0UL) {
return false; ret = false;
} } else {
/* SDM 25.3 "Changes to instruction behavior in VMX non-root" /* SDM 25.3 "Changes to instruction behavior in VMX non-root"
* *
* We always require "unrestricted guest" control enabled. So * We always require "unrestricted guest" control enabled. So
@ -147,24 +153,26 @@ static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
* CR0.PG = 1, CR4.PAE = 0 and IA32_EFER.LME = 1 is invalid. * CR0.PG = 1, CR4.PAE = 0 and IA32_EFER.LME = 1 is invalid.
* CR0.PE = 0 and CR0.PG = 1 is invalid. * CR0.PE = 0 and CR0.PG = 1 is invalid.
*/ */
if (((cr0 & CR0_PG) != 0UL) && (!is_pae(vcpu)) && ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL)) { if (((cr0 & CR0_PG) != 0UL) && (!is_pae(vcpu)) &&
return false; ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL)) {
} ret = false;
} else {
if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL)) { if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL)) {
return false; ret = false;
} } else {
/* SDM 6.15 "Exception and Interrupt Refrerence" GP Exception /* SDM 6.15 "Exception and Interrupt Refrerence" GP Exception
* *
* Loading CR0 regsiter with a set NW flag and a clear CD flag * Loading CR0 register with a set NW flag and a clear CD flag
* is invalid * is invalid
*/ */
if (((cr0 & CR0_CD) == 0UL) && ((cr0 & CR0_NW) != 0UL)) { if (((cr0 & CR0_CD) == 0UL) && ((cr0 & CR0_NW) != 0UL)) {
return false; ret = false;
}
}
}
} }
return true; return ret;
} }
/* /*
@ -199,9 +207,7 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
if (!is_cr0_write_valid(vcpu, cr0)) { if (!is_cr0_write_valid(vcpu, cr0)) {
pr_dbg("Invalid cr0 write operation from guest"); pr_dbg("Invalid cr0 write operation from guest");
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return; } else {
}
/* SDM 2.5 /* SDM 2.5
* When loading a control register, reserved bit should always set * When loading a control register, reserved bit should always set
* to the value previously read. * to the value previously read.
@ -282,31 +288,34 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx); pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx);
} }
}
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4) static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
{ {
bool ret = true;
/* Check if guest try to set fixed to 0 bits or reserved bits */ /* Check if guest try to set fixed to 0 bits or reserved bits */
if ((cr4 & cr4_always_off_mask) != 0U) { if ((cr4 & cr4_always_off_mask) != 0U) {
return false; ret = false;
} } else {
/* Do NOT support nested guest */ /* Do NOT support nested guest */
if ((cr4 & CR4_VMXE) != 0UL) { if ((cr4 & CR4_VMXE) != 0UL) {
return false; ret = false;
} } else {
/* Do NOT support PCID in guest */ /* Do NOT support PCID in guest */
if ((cr4 & CR4_PCIDE) != 0UL) { if ((cr4 & CR4_PCIDE) != 0UL) {
return false; ret = false;
} } else {
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
if ((cr4 & CR4_PAE) == 0UL) { if ((cr4 & CR4_PAE) == 0UL) {
return false; ret = false;
}
}
}
} }
} }
return true; return ret;
} }
/* /*
@ -352,9 +361,7 @@ void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
if (!is_cr4_write_valid(vcpu, cr4)) { if (!is_cr4_write_valid(vcpu, cr4)) {
pr_dbg("Invalid cr4 write operation from guest"); pr_dbg("Invalid cr4 write operation from guest");
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
return; } else {
}
if (((cr4 ^ old_cr4) & (CR4_PGE | CR4_PSE | CR4_PAE | CR4_SMEP | CR4_SMAP | CR4_PKE)) != 0UL) { if (((cr4 ^ old_cr4) & (CR4_PGE | CR4_PSE | CR4_PAE | CR4_SMEP | CR4_SMAP | CR4_PKE)) != 0UL) {
if (((cr4 & CR4_PAE) != 0UL) && (is_paging_enabled(vcpu)) && (is_long_mode(vcpu))) { if (((cr4 & CR4_PAE) != 0UL) && (is_paging_enabled(vcpu)) && (is_long_mode(vcpu))) {
load_pdptrs(vcpu); load_pdptrs(vcpu);
@ -371,8 +378,8 @@ void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
/* clear read cache, next time read should from VMCS */ /* clear read cache, next time read should from VMCS */
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached); bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", cr4, cr4_vmx);
cr4, cr4_vmx); }
} }
/* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */ /* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */