diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 2473db8e6..a7b0f57f5 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -42,7 +42,7 @@ void vcpu_thread(struct vcpu *vcpu) continue; } - if (need_reschedule(vcpu->pcpu_id)) { + if (need_reschedule(vcpu->pcpu_id) != 0) { /* * In extrem case, schedule() could return. Which * means the vcpu resume happens before schedule() diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 58b4678ea..1680ee936 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -33,7 +33,7 @@ int64_t hcall_get_api_version(struct vm *vm, uint64_t param) version.major_version = HV_API_MAJOR_VERSION; version.minor_version = HV_API_MINOR_VERSION; - if (copy_to_gpa(vm, &version, param, sizeof(version))) { + if (copy_to_gpa(vm, &version, param, sizeof(version)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -45,7 +45,7 @@ static int handle_vpic_irqline(struct vm *vm, int irq, enum irq_mode mode) { int ret = -1; - if (!vm) + if (vm == NULL) return ret; switch (mode) { @@ -69,7 +69,7 @@ handle_vioapic_irqline(struct vm *vm, int irq, enum irq_mode mode) { int ret = -1; - if (!vm) + if (vm == NULL) return ret; switch (mode) { @@ -95,7 +95,7 @@ static int handle_virt_irqline(struct vm *vm, uint64_t target_vmid, long intr_type; struct vm *target_vm = get_vm_from_vmid(target_vmid); - if (!vm || !param) + if ((vm == NULL) || (param == NULL)) return -1; intr_type = param->intr_type; @@ -136,7 +136,7 @@ int64_t hcall_create_vm(struct vm *vm, uint64_t param) struct vm_description vm_desc; memset((void *)&cv, 0, sizeof(cv)); - if (copy_from_gpa(vm, &cv, param, sizeof(cv))) { + if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -156,7 +156,7 @@ int64_t hcall_create_vm(struct vm *vm, uint64_t param) ret = 0; } - if (copy_to_gpa(vm, &cv.vmid, param, sizeof(cv.vmid))) { + if (copy_to_gpa(vm, &cv.vmid, param, sizeof(cv.vmid)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -210,10 +210,10 @@ int64_t hcall_create_vcpu(struct vm *vm, uint64_t vmid, uint64_t param) struct vm *target_vm = get_vm_from_vmid(vmid); - if (!target_vm || !param) + if ((target_vm == NULL) || (param == 0U)) return -1; - if (copy_from_gpa(vm, &cv, param, sizeof(cv))) { + if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -234,7 +234,7 @@ int64_t hcall_assert_irqline(struct vm *vm, uint64_t vmid, uint64_t param) int64_t ret = 0; struct acrn_irqline irqline; - if (copy_from_gpa(vm, &irqline, param, sizeof(irqline))) { + if (copy_from_gpa(vm, &irqline, param, sizeof(irqline)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -248,7 +248,7 @@ int64_t hcall_deassert_irqline(struct vm *vm, uint64_t vmid, uint64_t param) int64_t ret = 0; struct acrn_irqline irqline; - if (copy_from_gpa(vm, &irqline, param, sizeof(irqline))) { + if (copy_from_gpa(vm, &irqline, param, sizeof(irqline)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -262,7 +262,7 @@ int64_t hcall_pulse_irqline(struct vm *vm, uint64_t vmid, uint64_t param) int64_t ret = 0; struct acrn_irqline irqline; - if (copy_from_gpa(vm, &irqline, param, sizeof(irqline))) { + if (copy_from_gpa(vm, &irqline, param, sizeof(irqline)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -281,7 +281,7 @@ int64_t hcall_inject_msi(struct vm *vm, uint64_t vmid, uint64_t param) return -1; memset((void *)&msi, 0, sizeof(msi)); - if (copy_from_gpa(vm, &msi, param, sizeof(msi))) { + if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -302,7 +302,7 @@ int64_t hcall_set_ioreq_buffer(struct vm *vm, uint64_t vmid, uint64_t param) memset((void *)&iobuf, 0, sizeof(iobuf)); - if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf))) { + if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -363,7 +363,7 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id) struct vm *target_vm = get_vm_from_vmid(vmid); /* make sure we have set req_buf */ - if (!target_vm || target_vm->sw.io_shared_page == NULL) { + if ((target_vm == NULL) || target_vm->sw.io_shared_page == NULL) { pr_err("%s, invalid parameter\n", __func__); return -EINVAL; } @@ -381,7 +381,7 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id) req_buf = (union vhm_request_buffer *)target_vm->sw.io_shared_page; req = req_buf->req_queue + vcpu_id; - if (req->valid && + if ((req->valid != 0) && ((req->processed == REQ_STATE_SUCCESS) || (req->processed == REQ_STATE_FAILED))) complete_request(vcpu); @@ -417,21 +417,21 @@ int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm, attr = 0; if (memmap->type != MAP_UNMAP) { prot = (memmap->prot != 0) ? memmap->prot : memmap->prot_2; - if (prot & MEM_ACCESS_READ) + if ((prot & MEM_ACCESS_READ) != 0U) attr |= MMU_MEM_ATTR_READ; - if (prot & MEM_ACCESS_WRITE) + if ((prot & MEM_ACCESS_WRITE) != 0U) attr |= MMU_MEM_ATTR_WRITE; - if (prot & MEM_ACCESS_EXEC) + if ((prot & MEM_ACCESS_EXEC) != 0U) attr |= MMU_MEM_ATTR_EXECUTE; - if (prot & MEM_TYPE_WB) + if ((prot & MEM_TYPE_WB) != 0U) attr |= MMU_MEM_ATTR_WB_CACHE; - else if (prot & MEM_TYPE_WT) + else if ((prot & MEM_TYPE_WT) != 0U) attr |= MMU_MEM_ATTR_WT_CACHE; - else if (prot & MEM_TYPE_UC) + else if ((prot & MEM_TYPE_UC) != 0U) attr |= MMU_MEM_ATTR_UNCACHED; - else if (prot & MEM_TYPE_WC) + else if ((prot & MEM_TYPE_WC) != 0U) attr |= MMU_MEM_ATTR_WC; - else if (prot & MEM_TYPE_WP) + else if ((prot & MEM_TYPE_WP) != 0U) attr |= MMU_MEM_ATTR_WP; else attr |= MMU_MEM_ATTR_UNCACHED; @@ -447,12 +447,12 @@ int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param) struct vm_set_memmap memmap; struct vm *target_vm = get_vm_from_vmid(vmid); - if (!vm || !target_vm) + if ((vm == NULL) || (target_vm == NULL)) return -1; memset((void *)&memmap, 0, sizeof(memmap)); - if (copy_from_gpa(vm, &memmap, param, sizeof(memmap))) { + if (copy_from_gpa(vm, &memmap, param, sizeof(memmap)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -485,7 +485,7 @@ int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param) memset((void *)&set_memmaps, 0, sizeof(set_memmaps)); - if (copy_from_gpa(vm, &set_memmaps, param, sizeof(set_memmaps))) { + if (copy_from_gpa(vm, &set_memmaps, param, sizeof(set_memmaps)) != 0) { pr_err("%s: Unable copy param from vm\n", __func__); return -1; } @@ -524,7 +524,7 @@ int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param) memset((void *)&remap, 0, sizeof(remap)); - if (copy_from_gpa(vm, &remap, param, sizeof(remap))) { + if (copy_from_gpa(vm, &remap, param, sizeof(remap)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -543,7 +543,7 @@ int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param) remap.msi_data = info.pmsi_data; remap.msi_addr = info.pmsi_addr; - if (copy_to_gpa(vm, &remap, param, sizeof(remap))) { + if (copy_to_gpa(vm, &remap, param, sizeof(remap)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -563,12 +563,12 @@ int64_t hcall_gpa_to_hpa(struct vm *vm, uint64_t vmid, uint64_t param) memset((void *)&v_gpa2hpa, 0, sizeof(v_gpa2hpa)); - if (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa))) { + if (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) { pr_err("HCALL gpa2hpa: Unable copy param from vm\n"); return -1; } v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa); - if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa))) { + if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -587,14 +587,14 @@ int64_t hcall_assign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param) return -EINVAL; } - if (copy_from_gpa(vm, &bdf, param, sizeof(bdf))) { + if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) { pr_err("%s: Unable copy param from vm %d\n", __func__, vm->attr.id); return -EIO; } /* create a iommu domain for target VM if not created */ - if (!target_vm->iommu_domain) { + if (target_vm->iommu_domain == NULL) { if (target_vm->arch_vm.nworld_eptp == 0) { pr_err("%s, EPT of VM not set!\n", __func__, target_vm->attr.id); @@ -622,7 +622,7 @@ int64_t hcall_deassign_ptdev(struct vm *vm, uint64_t vmid, uint64_t param) if (target_vm == NULL) return -1; - if (copy_from_gpa(vm, &bdf, param, sizeof(bdf))) { + if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -643,7 +643,7 @@ int64_t hcall_set_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param) memset((void *)&irq, 0, sizeof(irq)); - if (copy_from_gpa(vm, &irq, param, sizeof(irq))) { + if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -673,7 +673,7 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid, uint64_t param) memset((void *)&irq, 0, sizeof(irq)); - if (copy_from_gpa(vm, &irq, param, sizeof(irq))) { + if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -697,12 +697,12 @@ int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param) memset((void *)&ssp, 0, sizeof(ssp)); - if (copy_from_gpa(vm, &ssp, param, sizeof(ssp))) { + if (copy_from_gpa(vm, &ssp, param, sizeof(ssp)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } - if (ssp.gpa) + if (ssp.gpa != 0U) hva = (uint64_t *)GPA2HVA(vm, ssp.gpa); else hva = (uint64_t *)NULL; @@ -718,19 +718,19 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param) target_vm_id = (cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT; target_vm = get_vm_from_vmid(target_vm_id); - if (!target_vm) { + if (target_vm == NULL) { return -1; } switch (cmd & PMCMD_TYPE_MASK) { case PMCMD_GET_PX_CNT: { - if (!target_vm->pm.px_cnt) { + if (target_vm->pm.px_cnt == 0U) { return -1; } if (copy_to_gpa(vm, &(target_vm->pm.px_cnt), param, - sizeof(target_vm->pm.px_cnt))) { + sizeof(target_vm->pm.px_cnt)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -744,7 +744,7 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param) * If it is stored as per-cpu in the future, * we need to check PMCMD_VCPUID_MASK in cmd. */ - if (!target_vm->pm.px_cnt) { + if (target_vm->pm.px_cnt == 0U) { return -1; } @@ -755,7 +755,7 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param) px_data = target_vm->pm.px_data + pn; if (copy_to_gpa(vm, px_data, param, - sizeof(struct cpu_px_data))) { + sizeof(struct cpu_px_data)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -764,12 +764,12 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param) } case PMCMD_GET_CX_CNT: { - if (!target_vm->pm.cx_cnt) { + if (target_vm->pm.cx_cnt == 0U) { return -1; } if (copy_to_gpa(vm, &(target_vm->pm.cx_cnt), param, - sizeof(target_vm->pm.cx_cnt))) { + sizeof(target_vm->pm.cx_cnt)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } @@ -779,19 +779,19 @@ int64_t hcall_get_cpu_pm_state(struct vm *vm, uint64_t cmd, uint64_t param) uint8_t cx_idx; struct cpu_cx_data *cx_data; - if (!target_vm->pm.cx_cnt) { + if (target_vm->pm.cx_cnt == 0) { return -1; } cx_idx = (cmd & PMCMD_STATE_NUM_MASK) >> PMCMD_STATE_NUM_SHIFT; - if (!cx_idx || (cx_idx > target_vm->pm.cx_cnt)) { + if ((cx_idx == 0U) || (cx_idx > target_vm->pm.cx_cnt)) { return -1; } cx_data = target_vm->pm.cx_data + cx_idx; if (copy_to_gpa(vm, cx_data, param, - sizeof(struct cpu_cx_data))) { + sizeof(struct cpu_cx_data)) != 0) { pr_err("%s: Unable copy param to vm\n", __func__); return -1; } diff --git a/hypervisor/common/io_request.c b/hypervisor/common/io_request.c index 5bafd55b9..8a69269a6 100644 --- a/hypervisor/common/io_request.c +++ b/hypervisor/common/io_request.c @@ -17,10 +17,10 @@ static void fire_vhm_interrupt(void) struct vcpu *vcpu; vm0 = get_vm_from_vmid(0); - ASSERT(vm0, "VM Pointer is NULL"); + ASSERT(vm0 != NULL, "VM Pointer is NULL"); vcpu = vcpu_from_vid(vm0, 0); - ASSERT(vcpu, "vcpu_from_vid failed"); + ASSERT(vcpu != NULL, "vcpu_from_vid failed"); vlapic_intr_edge(vcpu, VECTOR_VIRT_IRQ_VHM); } @@ -64,7 +64,7 @@ int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req) "vhm_request page broken!"); - if (!vcpu || !req || vcpu->vm->sw.io_shared_page == NULL) + if (vcpu == NULL || req == NULL || vcpu->vm->sw.io_shared_page == NULL) return -EINVAL; req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); @@ -165,10 +165,10 @@ void get_req_info(char *str, int str_max) list_for_each(pos, &vm_list) { vm = list_entry(pos, struct vm, list); req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page; - if (req_buf) { + if (req_buf != NULL) { for (i = 0; i < VHM_REQUEST_MAX; i++) { req = req_buf->req_queue + i; - if (req->valid) { + if (req->valid != 0) { _get_req_info_(req, &client_id, type, state, dir, &addr, &val); len = snprintf(str, size, diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index a37895521..5ed2a7879 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -68,7 +68,7 @@ alloc_entry(struct vm *vm, enum ptdev_intr_type type) /* allocate */ entry = calloc(1, sizeof(*entry)); - ASSERT(entry, "alloc memory failed"); + ASSERT(entry != NULL, "alloc memory failed"); entry->type = type; entry->vm = vm; diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 9cdbd008e..ea8ebd26d 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -154,9 +154,9 @@ void default_idle(void) int pcpu_id = get_cpu_id(); while (1) { - if (need_reschedule(pcpu_id)) + if (need_reschedule(pcpu_id) != 0) schedule(); - else if (need_offline(pcpu_id)) + else if (need_offline(pcpu_id) != 0) cpu_dead(pcpu_id); else __asm __volatile("pause" ::: "memory"); diff --git a/hypervisor/common/stack_protector.c b/hypervisor/common/stack_protector.c index db4df70f1..bc7a3338c 100644 --- a/hypervisor/common/stack_protector.c +++ b/hypervisor/common/stack_protector.c @@ -8,5 +8,5 @@ void __stack_chk_fail(void) { - ASSERT(0, "stack check fails in HV\n"); + ASSERT(false, "stack check fails in HV\n"); } diff --git a/hypervisor/common/trusty_hypercall.c b/hypervisor/common/trusty_hypercall.c index 231d87435..af35ed6a9 100644 --- a/hypervisor/common/trusty_hypercall.c +++ b/hypervisor/common/trusty_hypercall.c @@ -25,7 +25,7 @@ int64_t hcall_world_switch(struct vcpu *vcpu) return -EPERM; } - if (!vcpu->vm->arch_vm.sworld_eptp) { + if (vcpu->vm->arch_vm.sworld_eptp == 0U) { pr_err("%s, Trusty is not initialized!\n", __func__); return -EPERM; } @@ -44,7 +44,7 @@ int64_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param) return -EPERM; } - if (vcpu->vm->arch_vm.sworld_eptp) { + if (vcpu->vm->arch_vm.sworld_eptp != 0U) { pr_err("%s, Trusty already initialized!\n", __func__); return -EPERM; } diff --git a/hypervisor/common/vm_load.c b/hypervisor/common/vm_load.c index c59df9010..0ac3e18b7 100644 --- a/hypervisor/common/vm_load.c +++ b/hypervisor/common/vm_load.c @@ -43,7 +43,7 @@ static uint64_t create_zero_page(struct vm *vm) &(hva->hdr), sizeof(hva->hdr)); /* See if kernel has a RAM disk */ - if (sw_linux->ramdisk_src_addr) { + if (sw_linux->ramdisk_src_addr != NULL) { /* Copy ramdisk load_addr and size in zeropage header structure */ zeropage->hdr.ramdisk_addr = @@ -197,7 +197,7 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu) #endif /* Check if a RAM disk is present with Linux guest */ - if (vm->sw.linux_info.ramdisk_src_addr) { + if (vm->sw.linux_info.ramdisk_src_addr != NULL) { /* Get host-physical address for guest RAM disk */ hva = GPA2HVA(vm, (uint64_t)vm->sw.linux_info.ramdisk_load_addr);