diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index 6fdeefc31..81d82d086 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -374,8 +374,8 @@ void bsp_boot_init(void) /* Build time sanity checks to make sure hard-coded offset * is matching the actual offset! */ - ASSERT(sizeof(struct trusty_startup_param) - + sizeof(struct key_info) < 0x1000U, + ASSERT((sizeof(struct trusty_startup_param) + + sizeof(struct key_info)) < 0x1000U, "trusty_startup_param + key_info > 1Page size(4KB)!"); ASSERT(NR_WORLD == 2, "Only 2 Worlds supported!"); diff --git a/hypervisor/arch/x86/ept.c b/hypervisor/arch/x86/ept.c index 8808b8d7a..edf0fd315 100644 --- a/hypervisor/arch/x86/ept.c +++ b/hypervisor/arch/x86/ept.c @@ -400,13 +400,13 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu) list_for_each(pos, &vcpu->vm->mmio_list) { mmio_handler = list_entry(pos, struct mem_io_node, list); - if ((mmio->paddr + mmio->access_size <= + if (((mmio->paddr + mmio->access_size) <= mmio_handler->range_start) || (mmio->paddr >= mmio_handler->range_end)) { continue; } else if (!((mmio->paddr >= mmio_handler->range_start) && - (mmio->paddr + mmio->access_size <= + ((mmio->paddr + mmio->access_size) <= mmio_handler->range_end))) { pr_fatal("Err MMIO, addr:0x%llx, size:%x", mmio->paddr, mmio->access_size); diff --git a/hypervisor/arch/x86/guest/guest.c b/hypervisor/arch/x86/guest/guest.c index 0af73aa5f..74f6cc3b1 100644 --- a/hypervisor/arch/x86/guest/guest.c +++ b/hypervisor/arch/x86/guest/guest.c @@ -162,15 +162,15 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info, goto out; } - shift = i * pw_info->width + 12U; + shift = (i * pw_info->width) + 12U; index = (gva >> shift) & ((1UL << pw_info->width) - 1UL); page_size = 1UL << shift; if (pw_info->width == 10U) { /* 32bit entry */ - entry = *((uint32_t *)(base + 4U * index)); + entry = *((uint32_t *)(base + (4U * index))); } else { - entry = *((uint64_t *)(base + 8U * index)); + entry = *((uint64_t *)(base + (8U * index))); } /* check if the entry present */ @@ -501,7 +501,7 @@ void obtain_e820_mem_info(void) e820_mem.mem_bottom = entry->baseaddr; } - if (entry->baseaddr + entry->length + if ((entry->baseaddr + entry->length) > e820_mem.mem_top) { e820_mem.mem_top = entry->baseaddr + entry->length; @@ -664,7 +664,7 @@ uint64_t e820_alloc_low_memory(uint32_t size) /* Search for available low memory */ if ((entry->type != E820_TYPE_RAM) || (length < size) - || (start + size > MEM_1M)) { + || ((start + size) > MEM_1M)) { continue; } diff --git a/hypervisor/arch/x86/guest/instr_emul.c b/hypervisor/arch/x86/guest/instr_emul.c index cb7331cac..e5646c726 100644 --- a/hypervisor/arch/x86/guest/instr_emul.c +++ b/hypervisor/arch/x86/guest/instr_emul.c @@ -1459,7 +1459,7 @@ emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, * Intel SDM, Vol 2, Table 3-2: * "Range of Bit Positions Specified by Bit Offset Operands" */ - bitmask = (uint64_t)vie->opsize * 8UL - 1UL; + bitmask = ((uint64_t)vie->opsize * 8UL) - 1UL; bitoff = (uint64_t)vie->immediate & bitmask; /* Copy the bit into the Carry flag in %rflags */ diff --git a/hypervisor/arch/x86/guest/vioapic.c b/hypervisor/arch/x86/guest/vioapic.c index 638a15678..97e0d485d 100644 --- a/hypervisor/arch/x86/guest/vioapic.c +++ b/hypervisor/arch/x86/guest/vioapic.c @@ -248,8 +248,8 @@ vioapic_read(struct vioapic *vioapic, uint32_t addr) } /* redirection table entries */ - if (regnum >= IOAPIC_REDTBL && - (regnum < IOAPIC_REDTBL + (uint32_t)pincount * 2U) != 0) { + if ((regnum >= IOAPIC_REDTBL) && + (regnum < (IOAPIC_REDTBL + ((uint32_t)pincount * 2U)))) { uint32_t addr_offset = regnum - IOAPIC_REDTBL; uint32_t rte_offset = addr_offset / 2U; pin = (uint8_t)rte_offset; @@ -323,8 +323,8 @@ vioapic_write(struct vioapic *vioapic, uint32_t addr, uint32_t data) } /* redirection table entries */ - if (regnum >= IOAPIC_REDTBL && - (regnum < IOAPIC_REDTBL + (uint32_t)pincount * 2U) != 0U) { + if ((regnum >= IOAPIC_REDTBL) && + (regnum < (IOAPIC_REDTBL + ((uint32_t)pincount * 2U)))) { uint32_t addr_offset = regnum - IOAPIC_REDTBL; uint32_t rte_offset = addr_offset / 2U; pin = (uint8_t)rte_offset; diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 0237390aa..12e5422b8 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -814,7 +814,7 @@ vlapic_process_eoi(struct vlapic *vlapic) vlapic->isrvec_stk_top); } isrptr[i].val &= ~(1U << bitpos); - vector = i * 32U + bitpos; + vector = (i * 32U) + bitpos; dev_dbg(ACRN_DBG_LAPIC, "EOI vector %u", vector); vlapic_dump_isr(vlapic, "vlapic_process_eoi"); vlapic->isrvec_stk_top--; @@ -1201,7 +1201,7 @@ vlapic_pending_intr(struct vlapic *vlapic, uint32_t *vecptr) val = atomic_load32(&irrptr[i].val); bitpos = (uint32_t)fls32(val); if (bitpos != INVALID_BIT_INDEX) { - vector = i * 32U + bitpos; + vector = (i * 32U) + bitpos; if (PRIO(vector) > PRIO(lapic->ppr)) { if (vecptr != NULL) { *vecptr = vector; @@ -1578,7 +1578,7 @@ vlapic_reset(struct vlapic *vlapic) vlapic->svr_last = lapic->svr; - for (i = 0U; i < VLAPIC_MAXLVT_INDEX + 1U; i++) { + for (i = 0U; i < (VLAPIC_MAXLVT_INDEX + 1U); i++) { vlapic->lvt_last[i] = 0U; } @@ -2258,7 +2258,7 @@ apicv_batch_set_tmr(struct vlapic *vlapic) e = 256U; while (s < e) { - val = ptr[s/TMR_STEP_LEN + 1].val; + val = ptr[(s/TMR_STEP_LEN) + 1].val; val <<= TMR_STEP_LEN; val |= ptr[s/TMR_STEP_LEN].val; exec_vmwrite64(VMX_EOI_EXIT(s), val); diff --git a/hypervisor/arch/x86/io.c b/hypervisor/arch/x86/io.c index 2aee1e4d8..1edaae9a6 100644 --- a/hypervisor/arch/x86/io.c +++ b/hypervisor/arch/x86/io.c @@ -12,7 +12,7 @@ int dm_emulate_pio_post(struct vcpu *vcpu) int cur_context = vcpu->arch_vcpu.cur_context; union vhm_request_buffer *req_buf = NULL; uint32_t mask = - 0xFFFFFFFFUL >> (32 - 8 * vcpu->req.reqs.pio_request.size); + 0xFFFFFFFFUL >> (32U - (8U * vcpu->req.reqs.pio_request.size)); uint64_t *rax; req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); @@ -71,7 +71,7 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) sz = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1; port = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual); direction = VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual); - mask = 0xfffffffful >> (32 - 8 * sz); + mask = 0xfffffffful >> (32U - (8U * sz)); TRACE_4I(TRACE_VMEXIT_IO_INSTRUCTION, port, (uint32_t)direction, sz, (uint32_t)cur_context_idx); @@ -87,8 +87,8 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) for (handler = vm->arch_vm.io_handler; handler; handler = handler->next) { - if ((port >= handler->desc.addr + handler->desc.len) || - (port + sz <= handler->desc.addr)) { + if ((port >= (handler->desc.addr + handler->desc.len)) || + ((port + sz) <= handler->desc.addr)) { continue; } else if (!((port >= handler->desc.addr) && ((port + sz) <= (handler->desc.addr + handler->desc.len)))) { diff --git a/hypervisor/arch/x86/ioapic.c b/hypervisor/arch/x86/ioapic.c index 4e5836588..fc478fe75 100644 --- a/hypervisor/arch/x86/ioapic.c +++ b/hypervisor/arch/x86/ioapic.c @@ -146,7 +146,7 @@ static inline void ioapic_get_rte_entry(void *ioapic_addr, uint8_t pin, union ioapic_rte *rte) { - uint32_t rte_addr = (uint32_t)pin * 2U + 0x10U; + uint32_t rte_addr = ((uint32_t)pin * 2U) + 0x10U; rte->u.lo_32 = ioapic_read_reg32(ioapic_addr, rte_addr); rte->u.hi_32 = ioapic_read_reg32(ioapic_addr, rte_addr + 1U); } @@ -155,7 +155,7 @@ static inline void ioapic_set_rte_entry(void *ioapic_addr, uint8_t pin, union ioapic_rte rte) { - uint32_t rte_addr = (uint32_t)pin * 2U + 0x10U; + uint32_t rte_addr = ((uint32_t)pin * 2U) + 0x10U; ioapic_write_reg32(ioapic_addr, rte_addr, rte.u.lo_32); ioapic_write_reg32(ioapic_addr, rte_addr + 1U, rte.u.hi_32); } diff --git a/hypervisor/arch/x86/timer.c b/hypervisor/arch/x86/timer.c index 1389b3ca6..93a357472 100644 --- a/hypervisor/arch/x86/timer.c +++ b/hypervisor/arch/x86/timer.c @@ -241,13 +241,13 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms) uint16_t max_cal_ms; uint64_t current_tsc; - max_cal_ms = (PIT_MAX_COUNT - PIT_TARGET) * 1000U / PIT_TICK_RATE; + max_cal_ms = ((PIT_MAX_COUNT - PIT_TARGET) * 1000U) / PIT_TICK_RATE; cal_ms = min(cal_ms, max_cal_ms); /* Assume the 8254 delivers 18.2 ticks per second when 16 bits fully * wrap. This is about 1.193MHz or a clock period of 0.8384uSec */ - initial_pit = (uint16_t)(cal_ms * PIT_TICK_RATE / 1000U); + initial_pit = (uint16_t)((cal_ms * PIT_TICK_RATE) / 1000U); initial_pit += PIT_TARGET; /* Port 0x43 ==> Control word write; Data 0x30 ==> Select Counter 0, @@ -273,7 +273,7 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms) current_tsc = rdtsc() - current_tsc; - return current_tsc / cal_ms * 1000U; + return (current_tsc / cal_ms) * 1000U; } /* @@ -288,8 +288,8 @@ static uint64_t native_calibrate_tsc(void) &ecx_hz, &reserved); if (eax_denominator != 0U && ebx_numerator != 0U) { - return (uint64_t) ecx_hz * - ebx_numerator / eax_denominator; + return ((uint64_t) ecx_hz * + ebx_numerator) / eax_denominator; } } diff --git a/hypervisor/arch/x86/vtd.c b/hypervisor/arch/x86/vtd.c index a219622d0..28615ddee 100644 --- a/hypervisor/arch/x86/vtd.c +++ b/hypervisor/arch/x86/vtd.c @@ -93,7 +93,7 @@ dmar_set_bitslice(uint64_t var, uint64_t mask, status = iommu_read32(dmar_uint, offset); \ if (condition) \ break; \ - ASSERT((rdtsc() - start < CYCLES_PER_MS), \ + ASSERT(((rdtsc() - start) < CYCLES_PER_MS), \ "DMAR OP Timeout!"); \ asm volatile ("pause" ::: "memory"); \ } \ @@ -781,7 +781,7 @@ static int dmar_fault_handler(int irq, void *data) loop++; index = dma_fsts_fri(fsr); record_reg_offset = (uint32_t)dmar_uint->cap_fault_reg_offset - + index * 16U; + + (index * 16U); if (index >= dmar_uint->cap_num_fault_regs) { dev_dbg(ACRN_DBG_IOMMU, "%s: invalid FR Index", __func__); @@ -1221,7 +1221,7 @@ void suspend_iommu(void) for (i = 0U; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++) { iommu_fault_state[iommu_idx][i] = iommu_read32(dmar_unit, DMAR_FECTL_REG + - i * IOMMU_FAULT_REGISTER_STATE_NUM); + (i * IOMMU_FAULT_REGISTER_STATE_NUM)); } /* disable translation */ dmar_disable_translation(dmar_unit); @@ -1262,7 +1262,7 @@ void resume_iommu(void) /* restore IOMMU fault register state */ for (i = 0U; i < IOMMU_FAULT_REGISTER_STATE_NUM; i++) { iommu_write32(dmar_unit, DMAR_FECTL_REG + - i * IOMMU_FAULT_REGISTER_STATE_NUM, + (i * IOMMU_FAULT_REGISTER_STATE_NUM), iommu_fault_state[iommu_idx][i]); } /* enable translation */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 77f6ce5f4..a798804f3 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -424,9 +424,9 @@ _set_vm_memmap(struct vm *vm, struct vm *target_vm, base_paddr = get_hv_image_base(); if (((hpa <= base_paddr) && - (hpa + memmap->length > base_paddr)) || + ((hpa + memmap->length) > base_paddr)) || ((hpa >= base_paddr) && - (hpa < base_paddr + CONFIG_RAM_SIZE))) { + (hpa < (base_paddr + CONFIG_RAM_SIZE)))) { pr_err("%s: ERROR! overlap the HV memory region.", __func__); return -1; } diff --git a/hypervisor/debug/dump.c b/hypervisor/debug/dump.c index 7fd1dceef..a13af03a2 100644 --- a/hypervisor/debug/dump.c +++ b/hypervisor/debug/dump.c @@ -114,12 +114,12 @@ static void dump_guest_stack(struct vcpu *vcpu) printf("\r\nGuest Stack:\r\n"); printf("Dump stack for vcpu %hu, from gva 0x%016llx\r\n", vcpu->vcpu_id, cur_context->rsp); - for (i = 0U; i < DUMP_STACK_SIZE/32U; i++) { + for (i = 0U; i < (DUMP_STACK_SIZE/32U); i++) { printf("guest_rsp(0x%llx): 0x%016llx 0x%016llx " "0x%016llx 0x%016llx\r\n", - (cur_context->rsp+i*32), - tmp[i*4], tmp[i*4+1], - tmp[i*4+2], tmp[i*4+3]); + (cur_context->rsp+(i*32)), + tmp[i*4], tmp[(i*4)+1], + tmp[(i*4)+2], tmp[(i*4)+3]); } printf("\r\n"); } @@ -149,7 +149,7 @@ static void show_guest_call_trace(struct vcpu *vcpu) * try to print out call trace,here can not check if the rbp is valid * if the address is invalid, it will cause hv page fault * then halt system */ - while ((count++ < CALL_TRACE_HIERARCHY_MAX) && (bp != 0)) { + while ((count < CALL_TRACE_HIERARCHY_MAX) && (bp != 0)) { uint64_t parent_bp = 0UL; err_code = 0U; @@ -163,6 +163,7 @@ static void show_guest_call_trace(struct vcpu *vcpu) printf("BP_GVA(0x%016llx) RIP=0x%016llx\r\n", bp, parent_bp); /* Get previous rbp*/ bp = parent_bp; + count++; } printf("\r\n"); } @@ -186,10 +187,10 @@ static void show_host_call_trace(uint64_t rsp, uint64_t rbp, uint16_t pcpu_id) uint64_t *sp = (uint64_t *)rsp; printf("\r\nHost Stack: CPU_ID = %hu\r\n", pcpu_id); - for (i = 0U; i < DUMP_STACK_SIZE/32U; i++) { + for (i = 0U; i < (DUMP_STACK_SIZE/32U); i++) { printf("addr(0x%llx) 0x%016llx 0x%016llx 0x%016llx " - "0x%016llx\r\n", (rsp+i*32U), sp[i*4U], sp[i*4U+1U], - sp[i*4U+2U], sp[i*4U+3U]); + "0x%016llx\r\n", (rsp+(i*32U)), sp[i*4U], sp[(i*4U)+1U], + sp[(i*4U)+2U], sp[(i*4U)+3U]); } printf("\r\n"); @@ -216,14 +217,15 @@ static void show_host_call_trace(uint64_t rsp, uint64_t rbp, uint16_t pcpu_id) while ((rbp <= (uint64_t)&per_cpu(stack, pcpu_id)[CONFIG_STACK_SIZE - 1]) && (rbp >= (uint64_t)&per_cpu(stack, pcpu_id)[0]) - && (cb_hierarchy++ < CALL_TRACE_HIERARCHY_MAX)) { + && (cb_hierarchy < CALL_TRACE_HIERARCHY_MAX)) { printf("----> 0x%016llx\r\n", *(uint64_t *)(rbp + sizeof(uint64_t))); - if (*(uint64_t *)(rbp + 2*sizeof(uint64_t)) + if (*(uint64_t *)(rbp + (2*sizeof(uint64_t))) == SP_BOTTOM_MAGIC) { break; } rbp = *(uint64_t *)rbp; + cb_hierarchy++; } printf("\r\n"); } diff --git a/hypervisor/debug/logmsg.c b/hypervisor/debug/logmsg.c index eb6c191d0..917d96039 100644 --- a/hypervisor/debug/logmsg.c +++ b/hypervisor/debug/logmsg.c @@ -23,7 +23,7 @@ static struct logmsg logmsg; static inline void alloc_earlylog_sbuf(uint16_t pcpu_id) { uint32_t ele_size = LOG_ENTRY_SIZE; - uint32_t ele_num = ((HVLOG_BUF_SIZE >> 1) / phys_cpu_num + uint32_t ele_num = (((HVLOG_BUF_SIZE >> 1U) / phys_cpu_num) - SBUF_HEAD_SIZE) / ele_size; per_cpu(earlylog_sbuf, pcpu_id) = sbuf_allocate(ele_num, ele_size); @@ -158,10 +158,10 @@ void do_logmsg(uint32_t severity, const char *fmt, ...) if (sbuf != NULL) { msg_len = strnlen_s(buffer, LOG_MESSAGE_MAX_SIZE); - for (i = 0; i < (msg_len - 1) / LOG_ENTRY_SIZE + 1; + for (i = 0; i < (((msg_len - 1) / LOG_ENTRY_SIZE) + 1); i++) { (void)sbuf_put(sbuf, (uint8_t *)buffer + - i * LOG_ENTRY_SIZE); + (i * LOG_ENTRY_SIZE)); } } } diff --git a/hypervisor/debug/shell_internal.c b/hypervisor/debug/shell_internal.c index 307af0ff8..18ba5d283 100644 --- a/hypervisor/debug/shell_internal.c +++ b/hypervisor/debug/shell_internal.c @@ -768,8 +768,8 @@ int shell_vcpu_dumpreg(struct shell *p_shell, snprintf(temp_str, MAX_STR_SIZE, "= 0x%016llx 0x%016llx " "0x%016llx 0x%016llx\r\n", - tmp[i*4UL], tmp[i*4UL+1UL], - tmp[i*4UL+2UL], tmp[i*4UL+3UL]); + tmp[i*4UL], tmp[(i*4UL)+1UL], + tmp[(i*4UL)+2UL], tmp[(i*4UL)+3UL]); shell_puts(p_shell, temp_str); } } @@ -841,18 +841,18 @@ int shell_vcpu_dumpmem(struct shell *p_shell, "length %d:\r\n", vcpu_id, gva, length); shell_puts(p_shell, temp_str); - for (i = 0U; i < length/32U; i++) { + for (i = 0U; i < (length/32U); i++) { snprintf(temp_str, MAX_STR_SIZE, "= 0x%016llx 0x%016llx 0x%016llx " - "0x%016llx\r\n", tmp[i*4], tmp[i*4+1], - tmp[i*4+2], tmp[i*4+3]); + "0x%016llx\r\n", tmp[i*4], tmp[(i*4)+1], + tmp[(i*4)+2], tmp[(i*4)+3]); shell_puts(p_shell, temp_str); } if ((length % 32U) != 0) { snprintf(temp_str, MAX_STR_SIZE, "= 0x%016llx 0x%016llx 0x%016llx " - "0x%016llx\r\n", tmp[i*4], tmp[i*4+1], - tmp[i*4+2], tmp[i*4+3]); + "0x%016llx\r\n", tmp[i*4], tmp[(i*4)+1], + tmp[(i*4)+2], tmp[(i*4)+3]); shell_puts(p_shell, temp_str); } } diff --git a/hypervisor/include/arch/x86/mmu.h b/hypervisor/include/arch/x86/mmu.h index 90291a68d..d99021825 100644 --- a/hypervisor/include/arch/x86/mmu.h +++ b/hypervisor/include/arch/x86/mmu.h @@ -377,7 +377,7 @@ struct e820_entry { */ static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr) { - return pd + ((vaddr >> 22U) + 1U) * 1024U; + return pd + (((vaddr >> 22U) + 1U) * 1024U); } #define CACHE_FLUSH_INVALIDATE_ALL() \ diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h index 762b0ebad..dcd4d7b78 100644 --- a/hypervisor/include/arch/x86/vmx.h +++ b/hypervisor/include/arch/x86/vmx.h @@ -58,7 +58,7 @@ #define VMX_EOI_EXIT2_HIGH 0x00002021U #define VMX_EOI_EXIT3_FULL 0x00002022U #define VMX_EOI_EXIT3_HIGH 0x00002023U -#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + ((vector) / 64U) * 2U) +#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + (((vector) / 64U) * 2U)) #define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU #define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU /* 64-bit read-only data fields */ diff --git a/hypervisor/include/arch/x86/vtd.h b/hypervisor/include/arch/x86/vtd.h index 4fb35b6f3..981ba7af5 100644 --- a/hypervisor/include/arch/x86/vtd.h +++ b/hypervisor/include/arch/x86/vtd.h @@ -91,7 +91,7 @@ static inline uint16_t iommu_cap_fault_reg_offset(uint64_t cap) static inline uint16_t iommu_cap_max_fault_reg_offset(uint64_t cap) { return (iommu_cap_fault_reg_offset(cap) + - iommu_cap_num_fault_regs(cap) * 16U); + (iommu_cap_num_fault_regs(cap) * 16U)); } static inline uint8_t iommu_cap_zlr(uint64_t cap) @@ -141,7 +141,7 @@ static inline uint8_t iommu_cap_afl(uint64_t cap) static inline uint32_t iommu_cap_ndoms(uint64_t cap) { - return ((1U) << (4U + 2U * ((uint8_t)cap & 0x7U))); + return ((1U) << (4U + (2U * ((uint8_t)cap & 0x7U)))); } /* diff --git a/hypervisor/include/lib/bits.h b/hypervisor/include/lib/bits.h index 73df81aef..609ddb4c2 100644 --- a/hypervisor/include/lib/bits.h +++ b/hypervisor/include/lib/bits.h @@ -178,7 +178,7 @@ static inline uint16_t clz64(uint64_t value) #define build_bitmap_set(name, op_len, op_type, lock, nr, addr) \ static inline void name(uint16_t nr, volatile op_type *addr) \ { \ - nr = nr & (8U * sizeof(op_type) - 1U); \ + nr = nr & ((8U * sizeof(op_type)) - 1U); \ asm volatile(lock "or" op_len " %1,%0" \ : "+m" (*addr) \ : "r" ((op_type)(1UL<>= 1UL; mask >>= 1UL; - } while ((bits-- != 0UL) && (dividend != 0UL)); + } while (((bits--) != 0UL) && (dividend != 0UL)); res->r.qword = dividend; return 0; diff --git a/hypervisor/lib/memory.c b/hypervisor/lib/memory.c index c77ad5d8f..48fe31d7a 100644 --- a/hypervisor/lib/memory.c +++ b/hypervisor/lib/memory.c @@ -126,9 +126,9 @@ static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes) * memory pool */ memory = (char *)pool->start_addr + - pool->buff_size * - (idx * BITMAP_WORD_SIZE + - bit_idx); + (pool->buff_size * + ((idx * BITMAP_WORD_SIZE) + + bit_idx)); /* Update allocation bitmaps information for * selected buffers @@ -361,8 +361,8 @@ void *memcpy_s(void *d, size_t dmax, const void *s, size_t slen) ASSERT(false); } - if ((d > s && d <= s + slen - 1) - || (d < s && s <= d + dmax - 1)) { + if (((d > s) && (d <= (s + slen - 1U))) + || ((d < s) && (s <= (d + dmax - 1U)))) { ASSERT(false); } diff --git a/hypervisor/lib/sprintf.c b/hypervisor/lib/sprintf.c index 36c9ce13f..80be9b473 100644 --- a/hypervisor/lib/sprintf.c +++ b/hypervisor/lib/sprintf.c @@ -96,7 +96,7 @@ static const char *get_int(const char *s, int *x) /* parse uint32_teger */ while ((*s >= '0') && (*s <= '9')) { - *x = *x * 10 + (*s - '0'); + *x = (*x * 10) + (*s - '0'); s++; } @@ -648,7 +648,7 @@ static int charmem(int cmd, const char *s, int sz, void *hnd) if (cmd == PRINT_CMD_COPY) { if (sz < 0) { while ((*s) != '\0') { - if (n < param->sz - param->wrtn) { + if (n < (param->sz - param->wrtn)) { *p = *s; } p++; @@ -658,7 +658,7 @@ static int charmem(int cmd, const char *s, int sz, void *hnd) } else if (sz > 0) { while (((*s) != '\0') && n < sz) { - if (n < param->sz - param->wrtn) { + if (n < (param->sz - param->wrtn)) { *p = *s; } p++; @@ -674,7 +674,7 @@ static int charmem(int cmd, const char *s, int sz, void *hnd) } /* fill mode */ else { - n = (sz < param->sz - param->wrtn) ? sz : 0; + n = (sz < (param->sz - param->wrtn)) ? sz : 0; param->wrtn += sz; (void)memset(p, *s, n); }