HV:treewide:Update the type of return value and parameters of atomic operations

There are many type conversions in the atomic
operations invoking reported by static analysis tool. These
type conversions voilate MISRA C.

To keep uniform naming convention, rename atomic operation
function names:
atomic_set_int/long --> atomic_set32/64;
atomic_clear_int/long --> atomic_clear32/64;
atomic_load --> atomic_load32;
atomic_store --> atomic_store32;
atomic_swap --> atomic_swap32;
atomic_readandclear --> atomic_readandclear32;
atomic_inc --> atomic_inc32;
atomic_dec --> atomic_dec32;
atomic_cmpxchg --> atomic_cmpxchg32;
atomic_xadd --> atomic_xadd32.
Update the type of atomic_load32/64, atomic_store32/64,
atomic_swap32/64, atomic_cmpxchg32/6.
Update related variables and callers.

Note: the type of return value and parameters of atomic_xadd32/64
still keep signed int/long since caller pass
negative variable to atomic_xadd32/64;

V1-->V2:
	Add comments for atomic_set/clear to differ from
	bitmap_set/clear.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Reviewed-by: Junjie.Mao <junjie.mao@intel.com>
This commit is contained in:
Xiangyang Wu
2018-07-18 13:36:41 +08:00
committed by lijinxia
parent 3aa7d59497
commit d28fff2b06
12 changed files with 79 additions and 60 deletions

View File

@@ -260,8 +260,8 @@ struct vcpu {
uint64_t pending_pre_work; /* any pre work pending? */
bool launched; /* Whether the vcpu is launched on target pcpu */
uint32_t paused_cnt; /* how many times vcpu is paused */
int running; /* vcpu is picked up and run? */
int ioreq_pending; /* ioreq is ongoing or not? */
uint32_t running; /* vcpu is picked up and run? */
uint32_t ioreq_pending; /* ioreq is ongoing or not? */
struct vhm_request req; /* used by io/ept emulation */
struct mem_io mmio; /* used by io/ept emulation */

View File

@@ -7,7 +7,7 @@
#ifndef PTDEV_H
#define PTDEV_H
#define ACTIVE_FLAG 0x1 /* any non zero should be okay */
#define ACTIVE_FLAG 0x1U /* any non zero should be okay */
enum ptdev_intr_type {
PTDEV_INTR_MSI,

View File

@@ -42,8 +42,8 @@ static inline type name(const volatile type *ptr) \
: "cc", "memory"); \
return ret; \
}
build_atomic_load(atomic_load, "l", int, p)
build_atomic_load(atomic_load64, "q", long, p)
build_atomic_load(atomic_load32, "l", uint32_t, p)
build_atomic_load(atomic_load64, "q", uint64_t, p)
#define build_atomic_store(name, size, type, ptr, v) \
static inline void name(volatile type *ptr, type v) \
@@ -54,8 +54,8 @@ static inline void name(volatile type *ptr, type v) \
: "cc", "memory"); \
}
build_atomic_store(atomic_store16, "w", uint16_t, p, v)
build_atomic_store(atomic_store, "l", int, p, v)
build_atomic_store(atomic_store64, "q", long, p, v)
build_atomic_store(atomic_store32, "l", uint32_t, p, v)
build_atomic_store(atomic_store64, "q", uint64_t, p, v)
#define build_atomic_inc(name, size, type, ptr) \
static inline void name(type *ptr) \
@@ -64,8 +64,8 @@ static inline void name(type *ptr) \
: "=m" (*ptr) \
: "m" (*ptr)); \
}
build_atomic_inc(atomic_inc, "l", int, p)
build_atomic_inc(atomic_inc64, "q", long, p)
build_atomic_inc(atomic_inc32, "l", uint32_t, p)
build_atomic_inc(atomic_inc64, "q", uint64_t, p)
#define build_atomic_dec(name, size, type, ptr) \
static inline void name(type *ptr) \
@@ -75,13 +75,18 @@ static inline void name(type *ptr) \
: "m" (*ptr)); \
}
build_atomic_dec(atomic_dec16, "w", uint16_t, p)
build_atomic_dec(atomic_dec, "l", int, p)
build_atomic_dec(atomic_dec64, "q", long, p)
build_atomic_dec(atomic_dec32, "l", uint32_t, p)
build_atomic_dec(atomic_dec64, "q", uint64_t, p)
/*
* #define atomic_set_int(P, V) (*(unsigned int *)(P) |= (V))
/**
* #define atomic_set32(P, V) (*(unsigned int *)(P) |= (V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and setting result;
* uint32_t v The value needs to be set.
*/
static inline void atomic_set_int(unsigned int *p, unsigned int v)
static inline void atomic_set32(uint32_t *p, uint32_t v)
{
__asm __volatile(BUS_LOCK "orl %1,%0"
: "+m" (*p)
@@ -90,9 +95,13 @@ static inline void atomic_set_int(unsigned int *p, unsigned int v)
}
/*
* #define atomic_clear_int(P, V) (*(unsigned int *)(P) &= ~(V))
* #define atomic_clear32(P, V) (*(uint32_t *)(P) &= ~(V))
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and clearing result;
* uint32_t v The value needs to be cleared.
*/
static inline void atomic_clear_int(unsigned int *p, unsigned int v)
static inline void atomic_clear32(uint32_t *p, uint32_t v)
{
__asm __volatile(BUS_LOCK "andl %1,%0"
: "+m" (*p)
@@ -101,9 +110,14 @@ static inline void atomic_clear_int(unsigned int *p, unsigned int v)
}
/*
* #define atomic_set_long(P, V) (*(unsigned long *)(P) |= (V))
* #define atomic_set64(P, V) (*(uint64_t *)(P) |= (V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and setting result;
* uint32_t v The value needs to be set.
*/
static inline void atomic_set_long(unsigned long *p, unsigned long v)
static inline void atomic_set64(uint64_t *p, uint64_t v)
{
__asm __volatile(BUS_LOCK "orq %1,%0"
: "+m" (*p)
@@ -112,9 +126,14 @@ static inline void atomic_set_long(unsigned long *p, unsigned long v)
}
/*
* #define atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
* #define atomic_clear64(P, V) (*(uint64_t *)(P) &= ~(V))
*
* Parameters:
* uint32_t* p A pointer to memory area that stores source
* value and clearing result;
* uint32_t v The value needs to be cleared.
*/
static inline void atomic_clear_long(unsigned long *p, unsigned long v)
static inline void atomic_clear64(uint64_t *p, uint64_t v)
{
__asm __volatile(BUS_LOCK "andq %1,%0"
: "+m" (*p)
@@ -131,20 +150,20 @@ static inline type name(type *ptr, type v) \
: "cc", "memory"); \
return v; \
}
build_atomic_swap(atomic_swap, "l", int, p, v)
build_atomic_swap(atomic_swap64, "q", long, p, v)
build_atomic_swap(atomic_swap32, "l", uint32_t, p, v)
build_atomic_swap(atomic_swap64, "q", uint64_t, p, v)
/*
* #define atomic_readandclear(P) \
* (return (*(int *)(P)); *(int *)(P) = 0;)
* #define atomic_readandclear32(P) \
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
*/
#define atomic_readandclear(p) atomic_swap(p, 0)
#define atomic_readandclear32(p) atomic_swap32(p, 0U)
/*
* #define atomic_readandclear64(P) \
* (return (*(long *)(P)); *(long *)(P) = 0;)
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
*/
#define atomic_readandclear64(p) atomic_swap64(p, 0)
#define atomic_readandclear64(p) atomic_swap64(p, 0UL)
#define build_atomic_cmpxchg(name, size, type, ptr, old, new) \
static inline type name(volatile type *ptr, \
@@ -157,8 +176,8 @@ static inline type name(volatile type *ptr, \
: "memory"); \
return ret; \
}
build_atomic_cmpxchg(atomic_cmpxchg, "l", int, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg64, "q", long, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg32, "l", uint32_t, p, old, new)
build_atomic_cmpxchg(atomic_cmpxchg64, "q", uint64_t, p, old, new)
#define build_atomic_xadd(name, size, type, ptr, v) \
static inline type name(type *ptr, type v) \
@@ -170,11 +189,11 @@ static inline type name(type *ptr, type v) \
return v; \
}
build_atomic_xadd(atomic_xadd16, "w", uint16_t, p, v)
build_atomic_xadd(atomic_xadd, "l", int, p, v)
build_atomic_xadd(atomic_xadd32, "l", int, p, v)
build_atomic_xadd(atomic_xadd64, "q", long, p, v)
#define atomic_add_return(p, v) ( atomic_xadd(p, v) + v )
#define atomic_sub_return(p, v) ( atomic_xadd(p, -v) - v )
#define atomic_add_return(p, v) ( atomic_xadd32(p, v) + v )
#define atomic_sub_return(p, v) ( atomic_xadd32(p, -v) - v )
#define atomic_inc_return(v) atomic_add_return((v), 1)
#define atomic_dec_return(v) atomic_sub_return((v), 1)