hv: fix 'No brackets to then/else'

- add missing brackets for 'if/else' statements based on MISRA-C
  requirements

v1 -> v2:
 * add brackets for each conditions in 'if' statements to improve
   the readability
 * modify 'ptdev_init' to make the logic clearer

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
This commit is contained in:
Shiqing Gao
2018-09-30 14:04:18 +08:00
committed by wenlingz
parent 71927f3c5b
commit 0317cfb2b6
23 changed files with 131 additions and 72 deletions

View File

@@ -172,8 +172,9 @@ static void get_cpu_capabilities(void)
#ifndef CONFIG_RETPOLINE
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
ibrs_type = IBRS_RAW;
if (cpu_has_cap(X86_FEATURE_STIBP))
if (cpu_has_cap(X86_FEATURE_STIBP)) {
ibrs_type = IBRS_OPT;
}
}
#endif
}
@@ -789,14 +790,16 @@ static void ept_cap_detect(void)
msr_val = msr_val >> 32U;
/* Check if secondary processor based VM control is available. */
if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) == 0UL)
if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) == 0UL) {
return;
}
/* Read secondary processor based VM control. */
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_EPT))
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_EPT)) {
cpu_caps.ept_features = 1U;
}
}
static void apicv_cap_detect(void)

View File

@@ -55,10 +55,12 @@ void free_ept_mem(uint64_t *pml4_page)
void destroy_ept(struct vm *vm)
{
if (vm->arch_vm.nworld_eptp != NULL)
if (vm->arch_vm.nworld_eptp != NULL) {
free_ept_mem((uint64_t *)vm->arch_vm.nworld_eptp);
if (vm->arch_vm.m2p != NULL)
}
if (vm->arch_vm.m2p != NULL) {
free_ept_mem((uint64_t *)vm->arch_vm.m2p);
}
}
/* using return value INVALID_HPA as error code */
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)

View File

@@ -2213,8 +2213,9 @@ static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
/* RIP relative addressing starts from the
* following instruction
*/
if (vie->base_register == CPU_REG_RIP)
if (vie->base_register == CPU_REG_RIP) {
base += vie->num_processed;
}
}
@@ -2332,8 +2333,9 @@ int decode_instruction(struct vcpu *vcpu)
*/
if ((emul_ctxt->vie.op.op_flags & VIE_OP_F_CHECK_GVA_DI) != 0U) {
retval = instr_check_di(vcpu, emul_ctxt);
if (retval < 0)
if (retval < 0) {
return retval;
}
} else {
instr_check_gva(vcpu, emul_ctxt, cpu_mode);
}

View File

@@ -1982,9 +1982,10 @@ int vlapic_create(struct vcpu *vcpu)
uint64_t *pml4_page =
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
/* only need unmap it from SOS as UOS never mapped it */
if (is_vm0(vcpu->vm))
if (is_vm0(vcpu->vm)) {
ept_mr_del(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
}
ept_mr_add(vcpu->vm, pml4_page,
vlapic_apicv_get_apic_access_addr(),

View File

@@ -308,13 +308,15 @@ int reset_vm(struct vm *vm)
int i;
struct vcpu *vcpu = NULL;
if (vm->state != VM_PAUSED)
if (vm->state != VM_PAUSED) {
return -1;
}
foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu);
if (is_vcpu_bsp(vcpu))
if (is_vcpu_bsp(vcpu)) {
vm_sw_loader(vm, vcpu);
}
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
}

View File

@@ -134,16 +134,18 @@ static union lapic_base_msr lapic_base_msr;
static inline uint32_t read_lapic_reg32(uint32_t offset)
{
if (offset < 0x20U || offset > 0x3ffU)
if ((offset < 0x20U) || (offset > 0x3ffU)) {
return 0;
}
return mmio_read32(lapic_info.xapic.vaddr + offset);
}
void write_lapic_reg32(uint32_t offset, uint32_t value)
{
if (offset < 0x20U || offset > 0x3ffU)
if ((offset < 0x20U) || (offset > 0x3ffU)) {
return;
}
mmio_write32(value, lapic_info.xapic.vaddr + offset);
}
@@ -398,10 +400,11 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_0, icr.value_32.lo_32);
wait_for_delivery();
if (boot_cpu_data.family == 6U)
if (boot_cpu_data.family == 6U) {
udelay(10U); /* 10us is enough for Modern processors */
else
} else {
udelay(200U); /* 200us for old processors */
}
/* Send another start IPI as per the Intel Arch specification */
write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_1, icr.value_32.hi_32);
@@ -449,8 +452,9 @@ int send_shorthand_ipi(uint8_t vector,
if ((shorthand < INTR_LAPIC_ICR_SELF)
|| (shorthand > INTR_LAPIC_ICR_ALL_EX_SELF)
|| (delivery_mode > INTR_LAPIC_ICR_NMI))
|| (delivery_mode > INTR_LAPIC_ICR_NMI)) {
status = -EINVAL;
}
ASSERT(status == 0, "Incorrect arguments");

View File

@@ -95,8 +95,9 @@ void init_mtrr(struct vcpu *vcpu)
vcpu->mtrr.def_type.bits.fixed_enable = 1U;
vcpu->mtrr.def_type.bits.type = MTRR_MEM_TYPE_UC;
if (is_vm0(vcpu->vm))
if (is_vm0(vcpu->vm)) {
cap.value = msr_read(MSR_IA32_MTRR_CAP);
}
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
if (cap.bits.fix != 0U) {

View File

@@ -22,8 +22,9 @@ static void kick_notification(__unused uint32_t irq, __unused void *data)
struct smp_call_info_data *smp_call =
&per_cpu(smp_call_info, pcpu_id);
if (smp_call->func != NULL)
if (smp_call->func != NULL) {
smp_call->func(smp_call->data);
}
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
}
}

View File

@@ -283,8 +283,10 @@ static void add_pte(uint64_t *pde, uint64_t paddr_start,
set_pgentry(pte, paddr | prot);
paddr += PTE_SIZE;
vaddr += PTE_SIZE;
if (vaddr >= vaddr_end)
if (vaddr >= vaddr_end) {
break; /* done */
}
}
}

View File

@@ -23,20 +23,22 @@ static void acpi_gas_write(struct acpi_generic_address *gas, uint32_t val)
{
uint16_t val16 = (uint16_t)val;
if (gas->space_id == SPACE_SYSTEM_MEMORY)
if (gas->space_id == SPACE_SYSTEM_MEMORY) {
mmio_write16(val16, hpa2hva(gas->address));
else
} else {
pio_write16(val16, (uint16_t)gas->address);
}
}
static uint32_t acpi_gas_read(struct acpi_generic_address *gas)
{
uint32_t ret = 0U;
if (gas->space_id == SPACE_SYSTEM_MEMORY)
if (gas->space_id == SPACE_SYSTEM_MEMORY) {
ret = mmio_read16(hpa2hva(gas->address));
else
} else {
ret = pio_read16((uint16_t)gas->address);
}
return ret;
}
@@ -49,8 +51,9 @@ void do_acpi_s3(struct vm *vm, uint32_t pm1a_cnt_val,
acpi_gas_write(&(sx_data->pm1a_cnt), pm1a_cnt_val);
if (vm->pm.sx_state_data->pm1b_cnt.address != 0U)
if (vm->pm.sx_state_data->pm1b_cnt.address != 0U) {
acpi_gas_write(&(sx_data->pm1b_cnt), pm1b_cnt_val);
}
while (1) {
/* polling PM1 state register to detect wether
@@ -70,8 +73,9 @@ void do_acpi_s3(struct vm *vm, uint32_t pm1a_cnt_val,
* WAK_STS(bit 15) is set if system will transition to working
* state.
*/
if ((s1 & (1U << BIT_WAK_STS)) != 0U)
if ((s1 & (1U << BIT_WAK_STS)) != 0U) {
break;
}
}
}

View File

@@ -131,8 +131,9 @@ static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
* - maskable interrupt vectors [16,255] can be delivered
* through the local APIC.
*/
if (ret == 0)
if (ret == 0) {
return -1;
}
if (!(vector >= 16U && vector <= 255U)) {
dev_dbg(ACRN_DBG_INTR, "invalid vector %d from local APIC",
@@ -194,17 +195,17 @@ void dump_lapic(void)
/* SDM Vol3 -6.15, Table 6-4 - interrupt and exception classes */
static int get_excep_class(uint32_t vector)
{
if (vector == IDT_DE || vector == IDT_TS || vector == IDT_NP ||
vector == IDT_SS || vector == IDT_GP)
if ((vector == IDT_DE) || (vector == IDT_TS) || (vector == IDT_NP) ||
(vector == IDT_SS) || (vector == IDT_GP)) {
return EXCEPTION_CLASS_CONT;
else if (vector == IDT_PF || vector == IDT_VE)
} else if ((vector == IDT_PF) || (vector == IDT_VE)) {
return EXCEPTION_CLASS_PF;
else
} else {
return EXCEPTION_CLASS_BENIGN;
}
}
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
uint32_t err_code)
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code)
{
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
/* VECTOR_INVALID is also greater than 32 */
@@ -240,10 +241,11 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
arch_vcpu->exception_info.exception = vector;
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U)
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
arch_vcpu->exception_info.error = err_code;
else
} else {
arch_vcpu->exception_info.error = 0U;
}
return 0;
}
@@ -391,19 +393,26 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
uint64_t *pending_req_bits = &arch_vcpu->pending_req;
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT,
pending_req_bits)) {
pr_fatal("Triple fault happen -> shutdown!");
return -EFAULT;
}
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EPT_FLUSH,
pending_req_bits)) {
invept(vcpu);
}
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH,
pending_req_bits)) {
flush_vpid_single(arch_vcpu->vpid);
}
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TMR_UPDATE,
pending_req_bits)) {
vioapic_update_tmr(vcpu);
}
/* handling cancelled event injection when vcpu is switched out */
if (arch_vcpu->inject_event_pending) {
@@ -423,8 +432,9 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
/* SDM Vol 3 - table 6-2, inject high priority exception before
* maskable hardware interrupt */
if (vcpu_inject_hi_exception(vcpu) != 0)
if (vcpu_inject_hi_exception(vcpu) != 0) {
goto INTR_WIN;
}
/* inject NMI before maskable hardware interrupt */
if (bitmap_test_and_clear_lock(ACRN_REQUEST_NMI, pending_req_bits)) {
@@ -485,8 +495,9 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
}
/* SDM Vol3 table 6-2, inject lowpri exception */
if (vcpu_inject_lo_exception(vcpu) != 0)
if (vcpu_inject_lo_exception(vcpu) != 0) {
goto INTR_WIN;
}
INTR_WIN:
/*
@@ -536,9 +547,10 @@ void cancel_event_injection(struct vcpu *vcpu)
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
vcpu->arch_vcpu.inject_event_pending = true;
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
vcpu->arch_vcpu.inject_info.error_code =
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
}
vcpu->arch_vcpu.inject_info.intr_info = intinfo;
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
@@ -557,8 +569,9 @@ int exception_vmexit_handler(struct vcpu *vcpu)
status = -EINVAL;
}
if (status != 0)
if (status != 0) {
return status;
}
pr_dbg(" Handling guest exception");
@@ -577,10 +590,11 @@ int exception_vmexit_handler(struct vcpu *vcpu)
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
cpl = (cpl >> 5U) & 3U;
if (cpl < 3U)
if (cpl < 3U) {
int_err_code &= ~4U;
else
} else {
int_err_code |= 4U;
}
}
}

View File

@@ -173,8 +173,9 @@ int vmexit_handler(struct vcpu *vcpu)
uint32_t err_code = 0U;
if (type == VMX_INT_TYPE_HW_EXP) {
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
err_code = exec_vmread32(VMX_IDT_VEC_ERROR_CODE);
}
(void)vcpu_queue_exception(vcpu, vector, err_code);
vcpu->arch_vcpu.idt_vectoring_info = 0U;
} else if (type == VMX_INT_TYPE_NMI) {

View File

@@ -307,8 +307,9 @@ static void load_pdptrs(struct vcpu *vcpu)
static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
{
/* Shouldn't set always off bit */
if ((cr0 & cr0_always_off_mask) != 0UL)
if ((cr0 & cr0_always_off_mask) != 0UL) {
return false;
}
/* SDM 25.3 "Changes to instruction behavior in VMX non-root"
*
@@ -318,19 +319,22 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
* CR0.PE = 0 and CR0.PG = 1 is invalid.
*/
if (((cr0 & CR0_PG) != 0UL) && !is_pae(vcpu)
&& ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL))
&& ((vcpu_get_efer(vcpu) & MSR_IA32_EFER_LME_BIT) != 0UL)) {
return false;
}
if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL))
if (((cr0 & CR0_PE) == 0UL) && ((cr0 & CR0_PG) != 0UL)) {
return false;
}
/* SDM 6.15 "Exception and Interrupt Refrerence" GP Exception
*
* Loading CR0 regsiter with a set NW flag and a clear CD flag
* is invalid
*/
if (((cr0 & CR0_CD) == 0UL) && ((cr0 & CR0_NW) != 0UL))
if (((cr0 & CR0_CD) == 0UL) && ((cr0 & CR0_NW) != 0UL)) {
return false;
}
return true;
}
@@ -450,16 +454,19 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
{
/* Check if guest try to set fixed to 0 bits or reserved bits */
if ((cr4 & cr4_always_off_mask) != 0U)
if ((cr4 & cr4_always_off_mask) != 0U) {
return false;
}
/* Do NOT support nested guest */
if ((cr4 & CR4_VMXE) != 0UL)
if ((cr4 & CR4_VMXE) != 0UL) {
return false;
}
/* Do NOT support PCID in guest */
if ((cr4 & CR4_PCIDE) != 0UL)
if ((cr4 & CR4_PCIDE) != 0UL) {
return false;
}
if (is_long_mode(vcpu)) {
if ((cr4 & CR4_PAE) == 0UL) {

View File

@@ -229,8 +229,9 @@ dmar_wait_completion(struct dmar_drhd_rt *dmar_uint, uint32_t offset,
*/
condition = (temp_condition == pre_condition) ? true : false;
if (condition)
if (condition) {
break;
}
ASSERT(((rdtsc() - start) < CYCLES_PER_MS),
"DMAR OP Timeout!");
asm volatile ("pause" ::: "memory");