HV:treewide:Update the type of return value and parameters of atomic operations

There are many type conversions in the atomic
operations invoking reported by static analysis tool. These
type conversions voilate MISRA C.

To keep uniform naming convention, rename atomic operation
function names:
atomic_set_int/long --> atomic_set32/64;
atomic_clear_int/long --> atomic_clear32/64;
atomic_load --> atomic_load32;
atomic_store --> atomic_store32;
atomic_swap --> atomic_swap32;
atomic_readandclear --> atomic_readandclear32;
atomic_inc --> atomic_inc32;
atomic_dec --> atomic_dec32;
atomic_cmpxchg --> atomic_cmpxchg32;
atomic_xadd --> atomic_xadd32.
Update the type of atomic_load32/64, atomic_store32/64,
atomic_swap32/64, atomic_cmpxchg32/6.
Update related variables and callers.

Note: the type of return value and parameters of atomic_xadd32/64
still keep signed int/long since caller pass
negative variable to atomic_xadd32/64;

V1-->V2:
	Add comments for atomic_set/clear to differ from
	bitmap_set/clear.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Reviewed-by: Junjie.Mao <junjie.mao@intel.com>
This commit is contained in:
Xiangyang Wu 2018-07-18 13:36:41 +08:00 committed by lijinxia
parent 3aa7d59497
commit d28fff2b06
12 changed files with 79 additions and 60 deletions

View File

@ -49,7 +49,7 @@ is_entry_invalid(struct ptdev_remapping_info *entry)
static inline bool
is_entry_active(struct ptdev_remapping_info *entry)
{
return atomic_load((int *)&entry->active) == ACTIVE_FLAG;
return atomic_load32(&entry->active) == ACTIVE_FLAG;
}
/* require ptdev_lock protect */

View File

@ -66,8 +66,8 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
vcpu->vcpu_id = atomic_xadd16(&vm->hw.created_vcpus, 1U);
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
atomic_store64(
(long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
(long)vcpu);
(uint64_t *)&vm->hw.vcpu_array[vcpu->vcpu_id],
(uint64_t)vcpu);
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
"Allocated vcpu_id is out of range!");
@ -243,8 +243,8 @@ void destroy_vcpu(struct vcpu *vcpu)
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
atomic_store64(
(long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
(long)NULL);
(uint64_t *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
(uint64_t)NULL);
atomic_dec16(&vcpu->vm->hw.created_vcpus);
@ -300,13 +300,13 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
vcpu->prev_state = vcpu->state;
vcpu->state = new_state;
if (atomic_load(&vcpu->running) == 1) {
if (atomic_load32(&vcpu->running) == 1U) {
remove_vcpu_from_runqueue(vcpu);
make_reschedule_request(vcpu);
release_schedule_lock(vcpu->pcpu_id);
if (vcpu->pcpu_id != pcpu_id) {
while (atomic_load(&vcpu->running) == 1)
while (atomic_load32(&vcpu->running) == 1U)
__asm__ __volatile("pause" ::: "memory");
}
} else {

View File

@ -568,7 +568,7 @@ vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
uint32_t idx, val;
idx = lvt_off_to_idx(offset);
val = atomic_load((int *)&vlapic->lvt_last[idx]);
val = atomic_load32(&vlapic->lvt_last[idx]);
return val;
}
@ -636,7 +636,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
}
*lvtptr = val;
atomic_store((int *)&vlapic->lvt_last[idx], val);
atomic_store32(&vlapic->lvt_last[idx], val);
}
static void
@ -1198,7 +1198,7 @@ vlapic_pending_intr(struct vlapic *vlapic, uint32_t *vecptr)
/* i ranges effectively from 7 to 0 */
for (i = 8U; i > 0U; ) {
i--;
val = atomic_load((int *)&irrptr[i].val);
val = atomic_load32(&irrptr[i].val);
bitpos = (uint32_t)fls32(val);
if (bitpos != INVALID_BIT_INDEX) {
vector = i * 32U + bitpos;
@ -1233,7 +1233,7 @@ vlapic_intr_accepted(struct vlapic *vlapic, uint32_t vector)
idx = vector / 32U;
irrptr = &lapic->irr[0];
atomic_clear_int(&irrptr[idx].val, 1U << (vector % 32U));
atomic_clear32(&irrptr[idx].val, 1U << (vector % 32U));
vlapic_dump_irr(vlapic, "vlapic_intr_accepted");
isrptr = &lapic->isr[0];
@ -2183,8 +2183,8 @@ apicv_set_intr_ready(struct vlapic *vlapic, uint32_t vector, __unused bool level
idx = vector / 64U;
mask = 1UL << (vector % 64U);
atomic_set_long(&pir_desc->pir[idx], mask);
notify = (atomic_cmpxchg64((long *)&pir_desc->pending, 0, 1) == 0) ? 1 : 0;
atomic_set64(&pir_desc->pir[idx], mask);
notify = (atomic_cmpxchg64(&pir_desc->pending, 0UL, 1UL) == 0UL) ? 1 : 0;
return notify;
}
@ -2198,7 +2198,7 @@ apicv_pending_intr(struct vlapic *vlapic, __unused uint32_t *vecptr)
pir_desc = vlapic->pir_desc;
pending = atomic_load64((long *)&pir_desc->pending);
pending = atomic_load64(&pir_desc->pending);
if (pending == 0U) {
return 0;
}
@ -2308,7 +2308,7 @@ apicv_inject_pir(struct vlapic *vlapic)
struct lapic_reg *irr = NULL;
pir_desc = vlapic->pir_desc;
if (atomic_cmpxchg64((long *)(&pir_desc->pending), 1, 0) != 1) {
if (atomic_cmpxchg64(&pir_desc->pending, 1UL, 0UL) != 1UL) {
return;
}
@ -2317,7 +2317,7 @@ apicv_inject_pir(struct vlapic *vlapic)
irr = &lapic->irr[0];
for (i = 0U; i < 4U; i++) {
val = atomic_readandclear64((long *)&pir_desc->pir[i]);
val = atomic_readandclear64(&pir_desc->pir[i]);
if (val != 0UL) {
irr[i * 2U].val |= (uint32_t)val;
irr[(i * 2U) + 1U].val |= (uint32_t)(val >> 32);

View File

@ -347,7 +347,7 @@ static void complete_request(struct vcpu *vcpu)
req_buf = (union vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
req_buf->req_queue[vcpu->vcpu_id].valid = false;
atomic_store(&vcpu->ioreq_pending, 0);
atomic_store32(&vcpu->ioreq_pending, 0U);
return;
}

View File

@ -79,7 +79,7 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
* should fix the race issue between req.valid = true and vcpu pause
*/
atomic_store(&vcpu->ioreq_pending, 1);
atomic_store32(&vcpu->ioreq_pending, 1U);
pause_vcpu(vcpu, VCPU_PAUSED);
/* Must clear the signal before we mark req valid

View File

@ -75,7 +75,7 @@ alloc_entry(struct vm *vm, enum ptdev_intr_type type)
INIT_LIST_HEAD(&entry->softirq_node);
INIT_LIST_HEAD(&entry->entry_node);
atomic_clear_int(&entry->active, ACTIVE_FLAG);
atomic_clear32(&entry->active, ACTIVE_FLAG);
list_add(&entry->entry_node, &ptdev_list);
return entry;
@ -140,7 +140,7 @@ ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq,
ASSERT(node != NULL, "dev register failed");
entry->node = node;
atomic_set_int(&entry->active, ACTIVE_FLAG);
atomic_set32(&entry->active, ACTIVE_FLAG);
}
void
@ -148,7 +148,7 @@ ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
atomic_clear_int(&entry->active, ACTIVE_FLAG);
atomic_clear32(&entry->active, ACTIVE_FLAG);
unregister_handler_common(entry->node);
entry->node = NULL;

View File

@ -113,7 +113,7 @@ static void context_switch_out(struct vcpu *vcpu)
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store(&vcpu->running, 0);
atomic_store32(&vcpu->running, 0U);
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
@ -131,7 +131,7 @@ static void context_switch_in(struct vcpu *vcpu)
return;
}
atomic_store(&vcpu->running, 1);
atomic_store32(&vcpu->running, 1U);
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.

View File

@ -132,7 +132,7 @@ struct tgt_uart {
uint64_t base_address;
uint32_t clock_frequency;
uint32_t buffer_size;
int open_count;
uint32_t open_count;
/* Target specific function pointers. */
int (*init)(struct tgt_uart *tgt_uart);

View File

@ -130,10 +130,10 @@ static int uart16550_init(struct tgt_uart *tgt_uart)
status = -ENODEV;
} else {
if (strcmp(tgt_uart->uart_id, "STDIO") == 0) {
atomic_store(&tgt_uart->open_count, 0);
atomic_store32(&tgt_uart->open_count, 0U);
} else {
/* set open count to 1 to prevent open */
atomic_store(&tgt_uart->open_count, 1);
atomic_store32(&tgt_uart->open_count, 1U);
status = -EINVAL;
}
}
@ -148,7 +148,7 @@ static int uart16550_open(struct tgt_uart *tgt_uart,
int status = 0;
if (strcmp(tgt_uart->uart_id, "STDIO") == 0) {
if (atomic_cmpxchg(&tgt_uart->open_count, 0, 1) != 0) {
if (atomic_cmpxchg32(&tgt_uart->open_count, 0U, 1U) != 0U) {
return -EBUSY;
}
@ -237,7 +237,7 @@ static uint32_t uart16550_get_rx_err(uint32_t rx_data)
static void uart16550_close(struct tgt_uart *tgt_uart)
{
if (tgt_uart != NULL) {
if (atomic_cmpxchg(&tgt_uart->open_count, 1, 0) == 1) {
if (atomic_cmpxchg32(&tgt_uart->open_count, 1U, 0U) == 1U) {
/* TODO: Add logic to disable the UART */
}
}

View File

@ -260,8 +260,8 @@ struct vcpu {
uint64_t pending_pre_work; /* any pre work pending? */
bool launched; /* Whether the vcpu is launched on target pcpu */
uint32_t paused_cnt; /* how many times vcpu is paused */
int running; /* vcpu is picked up and run? */
int ioreq_pending; /* ioreq is ongoing or not? */
uint32_t running; /* vcpu is picked up and run? */
uint32_t ioreq_pending; /* ioreq is ongoing or not? */
struct vhm_request req; /* used by io/ept emulation */
struct mem_io mmio; /* used by io/ept emulation */

View File

@ -7,7 +7,7 @@
#ifndef PTDEV_H
#define PTDEV_H
#define ACTIVE_FLAG 0x1 /* any non zero should be okay */
#define ACTIVE_FLAG 0x1U /* any non zero should be okay */
enum ptdev_intr_type {
PTDEV_INTR_MSI,

View File

@ -42,8 +42,8 @@ static inline type name(const volatile type *ptr) \
: "cc", "memory"); \
return ret; \
}
build_atomic_load(atomic_load, "l", int, p)
build_atomic_load(atomic_load64, "q", long, p)
build_atomic_load(atomic_load32, "l", uint32_t, p)
build_atomic_load(atomic_load64, "q", uint64_t, p)
#define build_atomic_store(name, size, type, ptr, v) \
static inline void name(volatile type *ptr, type v) \
@ -54,8 +54,8 @@ static inline void name(volatile type *ptr, type v) \
: "cc", "memory"); \
}
build_atomic_store(atomic_store16, "w", uint16_t, p, v)
build_atomic_store(atomic_store, "l", int, p, v)
build_atomic_store(atomic_store64, "q", long, p, v)
build_atomic_store(atomic_store32, "l", uint32_t, p, v)
build_atomic_store(atomic_store64, "q", uint64_t, p, v)
#define build_atomic_inc(name, size, type, ptr) \
static inline void name(type *ptr) \
@ -64,8 +64,8 @@ static inline void name(type *ptr) \
: "=m" (*ptr) \
: "m" (*ptr)); \
}
build_atomic_inc(atomic_inc, "l", int, p)
build_atomic_inc(atomic_inc64, "q", long, p)
build_atomic_inc(atomic_inc32, "l", uint32_t, p)
build_atomic_inc(atomic_inc64, "q", uint64_t, p)
#define build_atomic_dec(name, size, type, ptr) \
static inline void name(type *ptr) \
@ -75,13 +75,18 @@ static inline void name(type *ptr) \
: "m" (*ptr)); \
}
build_atomic_dec(atomic_dec16, "w", uint16_t, p)
build_atomic_dec(atomic_dec, "l", int, p)
build_atomic_dec(atomic_dec64, "q", long, p)
build_atomic_dec(atomic_dec32, "l", uint32_t, p)
build_atomic_dec(atomic_dec64, "q", uint64_t, p)
/*
* #define atomic_set_int(P, V) (*(unsigned int *)(P) |= (V))
/**
* #define atomic_set32(P, V) (*(unsigned int *)(P) |= (V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and setting result;
* uint32_t v The value needs to be set.
*/
static inline void atomic_set_int(unsigned int *p, unsigned int v)
static inline void atomic_set32(uint32_t *p, uint32_t v)
{
__asm __volatile(BUS_LOCK "orl %1,%0"
: "+m" (*p)
@ -90,9 +95,13 @@ static inline void atomic_set_int(unsigned int *p, unsigned int v)
}
/*
* #define atomic_clear_int(P, V) (*(unsigned int *)(P) &= ~(V))
* #define atomic_clear32(P, V) (*(uint32_t *)(P) &= ~(V))
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and clearing result;
* uint32_t v The value needs to be cleared.
*/
static inline void atomic_clear_int(unsigned int *p, unsigned int v)
static inline void atomic_clear32(uint32_t *p, uint32_t v)
{
__asm __volatile(BUS_LOCK "andl %1,%0"
: "+m" (*p)
@ -101,9 +110,14 @@ static inline void atomic_clear_int(unsigned int *p, unsigned int v)
}
/*
* #define atomic_set_long(P, V) (*(unsigned long *)(P) |= (V))
* #define atomic_set64(P, V) (*(uint64_t *)(P) |= (V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and setting result;
* uint32_t v The value needs to be set.
*/
static inline void atomic_set_long(unsigned long *p, unsigned long v)
static inline void atomic_set64(uint64_t *p, uint64_t v)
{
__asm __volatile(BUS_LOCK "orq %1,%0"
: "+m" (*p)
@ -112,9 +126,14 @@ static inline void atomic_set_long(unsigned long *p, unsigned long v)
}
/*
* #define atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
* #define atomic_clear64(P, V) (*(uint64_t *)(P) &= ~(V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and clearing result;
* uint32_t v The value needs to be cleared.
*/
static inline void atomic_clear_long(unsigned long *p, unsigned long v)
static inline void atomic_clear64(uint64_t *p, uint64_t v)
{
__asm __volatile(BUS_LOCK "andq %1,%0"
: "+m" (*p)
@ -131,20 +150,20 @@ static inline type name(type *ptr, type v) \
: "cc", "memory"); \
return v; \
}
build_atomic_swap(atomic_swap, "l", int, p, v)
build_atomic_swap(atomic_swap64, "q", long, p, v)
build_atomic_swap(atomic_swap32, "l", uint32_t, p, v)
build_atomic_swap(atomic_swap64, "q", uint64_t, p, v)
/*
* #define atomic_readandclear(P) \
* (return (*(int *)(P)); *(int *)(P) = 0;)
* #define atomic_readandclear32(P) \
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
*/
#define atomic_readandclear(p) atomic_swap(p, 0)
#define atomic_readandclear32(p) atomic_swap32(p, 0U)
/*
* #define atomic_readandclear64(P) \
* (return (*(long *)(P)); *(long *)(P) = 0;)
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
*/
#define atomic_readandclear64(p) atomic_swap64(p, 0)
#define atomic_readandclear64(p) atomic_swap64(p, 0UL)
#define build_atomic_cmpxchg(name, size, type, ptr, old, new) \
static inline type name(volatile type *ptr, \
@ -157,8 +176,8 @@ static inline type name(volatile type *ptr, \
: "memory"); \
return ret; \
}
build_atomic_cmpxchg(atomic_cmpxchg, "l", int, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg64, "q", long, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg32, "l", uint32_t, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg64, "q", uint64_t, p, old, new)
#define build_atomic_xadd(name, size, type, ptr, v) \
static inline type name(type *ptr, type v) \
@ -170,11 +189,11 @@ static inline type name(type *ptr, type v) \
return v; \
}
build_atomic_xadd(atomic_xadd16, "w", uint16_t, p, v)
build_atomic_xadd(atomic_xadd, "l", int, p, v)
build_atomic_xadd(atomic_xadd32, "l", int, p, v)
build_atomic_xadd(atomic_xadd64, "q", long, p, v)
#define atomic_add_return(p, v) ( atomic_xadd(p, v) + v )
#define atomic_sub_return(p, v) ( atomic_xadd(p, -v) - v )
#define atomic_add_return(p, v) ( atomic_xadd32(p, v) + v )
#define atomic_sub_return(p, v) ( atomic_xadd32(p, -v) - v )
#define atomic_inc_return(v) atomic_add_return((v), 1)
#define atomic_dec_return(v) atomic_sub_return((v), 1)