HV: treewide: terminate 'if .. else if' constructs with 'else'

MISRA C requires that a 'if' statement followed by one or more 'else if'
statement shall be terminated by an 'else' statement which contains either
side-effect or a comment, to ensure that conditions are considered
exhaustively.

Note that a simple 'if' statement is not required to be terminated by 'else'.

This patch fixes such violations by either refactoring the code or add the
'else' statement with either a comment (describing why this case can be skipped)
or logging the event. It may not be satisfactory for the release version where
logging is no-op, but properly handling these non-trivial cases is out of the
scope of this patch.

v1 -> v2:

    * Fix unintended semantic changes in add_(msix|intx)_remapping and
      io_instr_vmexit_handler.
    * Simplify boolean checks in vpic_ocw2.
    * Rephrase the comment in strtol_deci.

Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2018-07-18 22:59:56 +08:00 committed by lijinxia
parent e13c852c4b
commit f691cab994
15 changed files with 151 additions and 77 deletions

View File

@ -316,20 +316,26 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
entry->virt_bdf = virt_bdf;
entry->phys_bdf = phys_bdf;
entry->ptdev_intr_info.msi.msix_entry_index = msix_entry_index;
} else if ((entry->vm != vm) && is_vm0(entry->vm)) {
entry->vm = vm;
entry->virt_bdf = virt_bdf;
} else if ((entry->vm != vm) && !is_vm0(entry->vm)) {
pr_err("MSIX pbdf%x idx=%d already in vm%d with vbdf%x, not "
"able to add into vm%d with vbdf%x", entry->phys_bdf,
entry->ptdev_intr_info.msi.msix_entry_index,
entry->vm->attr.id,
entry->virt_bdf, vm->attr.id, virt_bdf);
ASSERT(false, "msix entry pbdf%x idx%d already in vm%d",
phys_bdf, msix_entry_index, entry->vm->attr.id);
} else if (entry->vm != vm) {
if (is_vm0(entry->vm)) {
entry->vm = vm;
entry->virt_bdf = virt_bdf;
} else {
pr_err("MSIX pbdf%x idx=%d already in vm%d with vbdf%x,"
" not able to add into vm%d with vbdf%x",
entry->phys_bdf,
entry->ptdev_intr_info.msi.msix_entry_index,
entry->vm->attr.id,
entry->virt_bdf, vm->attr.id, virt_bdf);
ASSERT(false, "msix entry pbdf%x idx%d already in vm%d",
phys_bdf, msix_entry_index, entry->vm->attr.id);
spinlock_release(&ptdev_lock);
return &invalid_entry;
spinlock_release(&ptdev_lock);
return &invalid_entry;
}
} else {
/* The mapping has already been added to the VM. No action
* required. */
}
spinlock_release(&ptdev_lock);
@ -394,22 +400,27 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
entry->ptdev_intr_info.intx.phys_pin = phys_pin;
entry->ptdev_intr_info.intx.virt_pin = virt_pin;
entry->ptdev_intr_info.intx.vpin_src = vpin_src;
} else if ((entry->vm != vm) && is_vm0(entry->vm)) {
entry->vm = vm;
entry->ptdev_intr_info.intx.virt_pin = virt_pin;
entry->ptdev_intr_info.intx.vpin_src = vpin_src;
} else if ((entry->vm != vm) && !is_vm0(entry->vm)) {
pr_err("INTX pin%d already in vm%d with vpin%d, not able to "
"add into vm%d with vpin%d",
entry->ptdev_intr_info.intx.phys_pin,
entry->vm->attr.id,
entry->ptdev_intr_info.intx.virt_pin,
vm->attr.id, virt_pin);
ASSERT(false, "intx entry pin%d already vm%d",
phys_pin, entry->vm->attr.id);
} else if (entry->vm != vm) {
if (is_vm0(entry->vm)) {
entry->vm = vm;
entry->ptdev_intr_info.intx.virt_pin = virt_pin;
entry->ptdev_intr_info.intx.vpin_src = vpin_src;
} else {
pr_err("INTX pin%d already in vm%d with vpin%d,"
" not able to add into vm%d with vpin%d",
entry->ptdev_intr_info.intx.phys_pin,
entry->vm->attr.id,
entry->ptdev_intr_info.intx.virt_pin,
vm->attr.id, virt_pin);
ASSERT(false, "intx entry pin%d already vm%d",
phys_pin, entry->vm->attr.id);
spinlock_release(&ptdev_lock);
return &invalid_entry;
spinlock_release(&ptdev_lock);
return &invalid_entry;
}
} else {
/* The mapping has already been added to the VM. No action
* required. */
}
spinlock_release(&ptdev_lock);
@ -600,7 +611,7 @@ void ptdev_intx_ack(struct vm *vm, uint8_t virt_pin,
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
struct ptdev_msi_info *info)
{
struct ptdev_remapping_info *entry = NULL;
struct ptdev_remapping_info *entry;
bool lowpri = !is_vm0(vm);
/*

View File

@ -24,15 +24,15 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
if (tmp->leaf < leaf) {
continue;
}
if (tmp->leaf == leaf) {
} else if (tmp->leaf == leaf) {
if ((tmp->flags & CPUID_CHECK_SUBLEAF) != 0U &&
(tmp->subleaf != subleaf)) {
continue;
}
entry = tmp;
break;
} else if (tmp->leaf > leaf) {
} else {
/* tmp->leaf > leaf */
break;
}
}

View File

@ -1328,10 +1328,10 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
err_code |= PAGE_FAULT_WR_FLAG;
}
error = gva2gpa(vcpu, stack_gla, &stack_gpa, &err_code);
if (error == -EFAULT) {
vcpu_inject_pf(vcpu, stack_gla, err_code);
return error;
} else if (error < 0) {
if (error < 0) {
if (error == -EFAULT) {
vcpu_inject_pf(vcpu, stack_gla, err_code);
}
return error;
}
if (pushop != 0) {
@ -1728,10 +1728,10 @@ vie_init(struct vie *vie, struct vcpu *vcpu)
err_code = PAGE_FAULT_ID_FLAG;
ret = copy_from_gva(vcpu, vie->inst, guest_rip_gva,
inst_len, &err_code);
if (ret == -EFAULT) {
vcpu_inject_pf(vcpu, guest_rip_gva, err_code);
return ret;
} else if (ret < 0) {
if (ret < 0) {
if (ret == -EFAULT) {
vcpu_inject_pf(vcpu, guest_rip_gva, err_code);
}
return ret;
}
@ -2096,6 +2096,8 @@ decode_immediate(struct vie *vie)
}
} else if ((vie->op.op_flags & VIE_OP_F_IMM8) != 0U) {
vie->imm_bytes = 1U;
} else {
/* No op_flag on immediate operand size */
}
n = vie->imm_bytes;

View File

@ -39,10 +39,10 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
err_code = 0U;
err = copy_from_gva(vcpu, &uhdr, gva, sizeof(uhdr), &err_code);
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);
return;
} else if (err < 0) {
if (err < 0) {
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);
}
return;
}
@ -57,10 +57,10 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
err_code = 0U;
err = copy_from_gva(vcpu, ucode_ptr, gva, data_size, &err_code);
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);
return;
} else if (err < 0) {
if (err < 0) {
if (err == -EFAULT) {
vcpu_inject_pf(vcpu, gva, err_code);
}
return;
}

View File

@ -366,6 +366,9 @@ vioapic_write(struct vioapic *vioapic, uint32_t addr, uint32_t data)
dev_dbg(ACRN_DBG_IOAPIC,
"vpic wire mode -> INTR");
}
} else {
/* Can never happen since IOAPIC_RTE_INTMASK
* is changed. */
}
}
vioapic->rtbl[pin] = new;
@ -617,6 +620,8 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
data);
mmio->mmio_status = MMIO_TRANS_VALID;
} else {
/* Can never happen due to the range of read_write. */
}
} else {
pr_err("All RW to IOAPIC must be 32-bits in size");

View File

@ -626,9 +626,13 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
dev_dbg(ACRN_DBG_LAPIC,
"vpic wire mode -> NULL");
}
} else {
/* APIC_LVT_M unchanged. No action required. */
}
} else if (offset == APIC_OFFSET_TIMER_LVT) {
vlapic_update_lvtt(vlapic, val);
} else {
/* No action required. */
}
*lvtptr = val;
@ -992,6 +996,8 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
} else if (target->apic_page->ppr >
vlapic->apic_page->ppr) {
target = vlapic;
} else {
/* target is the dest */
}
} else {
bitmap_set(vcpu_id, dmask);
@ -1168,6 +1174,8 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic)
target_vcpu->vcpu_id,
target_vcpu->vm->attr.id);
schedule_vcpu(target_vcpu);
} else {
pr_err("Unhandled icrlo write with mode %u\n", mode);
}
}
@ -2067,6 +2075,8 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct mem_io *mmio,
mmio->access_size);
mmio->mmio_status = MMIO_TRANS_VALID;
} else {
/* Can never happen due to the range of mmio->read_write. */
}
return ret;
@ -2387,6 +2397,9 @@ int apic_access_vmexit_handler(struct vcpu *vcpu)
return err;
}
err = emulate_instruction(vcpu);
} else {
pr_err("Unhandled APIC access type: %lu\n", access_type);
err = -EINVAL;
}
TRACE_2L(TRACE_VMEXIT_APICV_ACCESS, qual, (uint64_t)vlapic);

View File

@ -426,9 +426,11 @@ static int vpic_ocw2(struct vpic *vpic, struct pic *pic, uint8_t val)
master_pic(vpic, pic) ? isr_bit : isr_bit + 8U,
PTDEV_VPIN_PIC);
}
} else if ((val & OCW2_SL) != 0 && pic->rotate == true) {
} else if ((val & OCW2_SL) != 0U && pic->rotate) {
/* specific priority */
pic->lowprio = val & 0x7U;
} else {
/* TODO: Any action required in this case? */
}
return 0;

View File

@ -76,6 +76,14 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION, port, (uint32_t)direction, sz,
(uint32_t)cur_context_idx);
/*
* Post-conditions of the loop:
*
* status == 0 : The access has been handled properly.
* status == -EIO : The access spans multiple devices and cannot
* be handled.
* status == -EINVAL : No valid handler found for this access.
*/
for (handler = vm->arch_vm.io_handler;
handler; handler = handler->next) {
@ -86,45 +94,45 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
<= (handler->desc.addr + handler->desc.len)))) {
pr_fatal("Err:IO, port 0x%04x, size=%u spans devices",
port, sz);
return -EIO;
}
if (direction == 0) {
handler->desc.io_write(handler, vm, port, sz,
cur_context->guest_cpu_regs.regs.rax);
pr_dbg("IO write on port %04x, data %08x", port,
cur_context->guest_cpu_regs.regs.rax & mask);
status = 0;
status = -EIO;
break;
} else {
uint32_t data = handler->desc.io_read(handler, vm,
port, sz);
struct cpu_regs *regs =
&cur_context->guest_cpu_regs.regs;
cur_context->guest_cpu_regs.regs.rax &= ~mask;
cur_context->guest_cpu_regs.regs.rax |= data & mask;
if (direction == 0) {
handler->desc.io_write(handler, vm, port, sz,
regs->rax);
pr_dbg("IO read on port %04x, data %08x", port, data);
pr_dbg("IO write on port %04x, data %08x", port,
regs->rax & mask);
} else {
uint32_t data = handler->desc.io_read(handler,
vm, port, sz);
regs->rax &= ~mask;
regs->rax |= data & mask;
pr_dbg("IO read on port %04x, data %08x",
port, data);
}
status = 0;
break;
}
}
/* Go for VHM */
if (status != 0) {
if (status == -EINVAL) {
uint64_t *rax = &cur_context->guest_cpu_regs.regs.rax;
(void)memset(&vcpu->req, 0, sizeof(struct vhm_request));
dm_emulate_pio_pre(vcpu, exit_qual, sz, *rax);
status = acrn_insert_request_wait(vcpu, &vcpu->req);
}
if (status != 0) {
pr_fatal("Err:IO %s access to port 0x%04x, size=%u",
(direction != 0) ? "read" : "write", port, sz);
if (status != 0) {
pr_fatal("Err:IO %s access to port 0x%04x, size=%u",
(direction != 0) ? "read" : "write", port, sz);
}
}
return status;

View File

@ -188,6 +188,8 @@ void invept(struct vcpu *vcpu)
}
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
} else {
/* Neither type of INVEPT is supported. Skip. */
}
}

View File

@ -230,6 +230,9 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
/* generate double fault */
vector = IDT_DF;
err_code = 0U;
} else {
/* Trigger the given exception instead of override it with
* double/triple fault. */
}
vcpu->arch_vcpu.exception_info.exception = vector;

View File

@ -157,6 +157,8 @@ int vmexit_handler(struct vcpu *vcpu)
} else if (type == VMX_INT_TYPE_NMI) {
vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
vcpu->arch_vcpu.idt_vectoring_info = 0U;
} else {
/* No action on EXT_INT or SW exception. */
}
}

View File

@ -408,6 +408,8 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
context->ia32_efer &= ~MSR_IA32_EFER_LMA_BIT;
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
} else {
/* CR0.PG unchanged. */
}
/* If CR0.CD or CR0.NW get changed */
@ -586,6 +588,8 @@ static void init_guest_state(struct vcpu *vcpu)
vmx_write_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
vmx_write_cr3(vcpu, vm->arch_vm.guest_init_pml4 | CR3_PWT);
vmx_write_cr0(vcpu, CR0_PG | CR0_PE | CR0_NE);
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/***************************************************/
@ -732,6 +736,8 @@ static void init_guest_state(struct vcpu *vcpu)
/* Limit */
limit = HOST_GDT_SIZE - 1U;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* GDTR Base */
@ -766,6 +772,8 @@ static void init_guest_state(struct vcpu *vcpu)
/* Base */
base = idtb.base;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* IDTR Base */
@ -814,6 +822,8 @@ static void init_guest_state(struct vcpu *vcpu)
asm volatile ("movw %%fs, %%ax":"=a" (fs));
asm volatile ("movw %%gs, %%ax":"=a" (gs));
limit = 0xffffffffU;
} else {
/* vcpu_mode will never be CPU_MODE_COMPATIBILITY */
}
/* Selector */

View File

@ -681,6 +681,9 @@ int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
ret = ptdev_add_msix_remapping(target_vm,
irq.virt_bdf, irq.phys_bdf,
irq.is.msix.vector_cnt);
} else {
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
ret = -1;
}
return ret;
@ -712,6 +715,9 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param)
ptdev_remove_msix_remapping(target_vm,
irq.virt_bdf,
irq.is.msix.vector_cnt);
} else {
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
ret = -1;
}
return ret;

View File

@ -161,8 +161,8 @@ static const char *get_flags(const char *s, int *flags)
static const char *get_length_modifier(const char *s,
int *flags, uint64_t *mask)
{
/* check for h[h] (char/short) */
if (*s == 'h') {
/* check for h[h] (char/short) */
s++;
if (*s == 'h') {
*flags |= PRINT_FLAG_CHAR;
@ -173,7 +173,7 @@ static const char *get_length_modifier(const char *s,
*mask = 0x0000FFFF;
}
} else if (*s == 'l') {
/* check for l[l] (long/long long) */
/* check for l[l] (long/long long) */
s++;
if (*s == 'l') {
*flags |= PRINT_FLAG_LONG_LONG;
@ -181,6 +181,8 @@ static const char *get_length_modifier(const char *s,
} else {
*flags |= PRINT_FLAG_LONG;
}
} else {
/* No length modifiers found. */
}
return s;
@ -375,6 +377,8 @@ static int print_decimal(struct print_param *param, int64_t value)
} else if ((param->vars.flags & PRINT_FLAG_SPACE) != 0) {
param->vars.prefix = " ";
param->vars.prefixlen = 1;
} else {
/* No prefix specified. */
}
}
@ -662,6 +666,8 @@ static int charmem(int cmd, const char *s, int sz, void *hnd)
s++;
n++;
}
} else {
/* sz == 0, no copy needed. */
}
param->wrtn += n;

View File

@ -37,7 +37,10 @@ long strtol_deci(const char *nptr)
} else if (c == '+') {
c = *s;
s++;
} else {
/* No sign character. */
}
/*
* Compute the cutoff value between legal numbers and illegal
* numbers. That is the largest legal value, divided by the
@ -63,8 +66,7 @@ long strtol_deci(const char *nptr)
do {
if (c >= '0' && c <= '9') {
c -= '0';
}
else {
} else {
break;
}
if (c >= base) {
@ -72,8 +74,7 @@ long strtol_deci(const char *nptr)
}
if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) {
any = -1;
}
else {
} else {
any = 1;
acc *= base;
acc += c;
@ -87,6 +88,9 @@ long strtol_deci(const char *nptr)
acc = (neg != 0) ? LONG_MIN : LONG_MAX;
} else if (neg != 0) {
acc = -acc;
} else {
/* There is no overflow and no leading '-' exists. In such case
* acc already holds the right number. No action required. */
}
return acc;
}