hv: refine atomic_xadd

1. rename atomic_xadd_int to atomic_xadd, add atomic_xadd64.
2. add atomic_add/sbu64_return, atomic_inc/dec64_return.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2018-05-16 16:12:22 +08:00 committed by lijinxia
parent bd3f3b00f7
commit fadaf14a94
4 changed files with 23 additions and 25 deletions

View File

@ -80,7 +80,7 @@ int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
* vcpu->vcpu_id = vm->hw.created_vcpus; * vcpu->vcpu_id = vm->hw.created_vcpus;
* vm->hw.created_vcpus++; * vm->hw.created_vcpus++;
*/ */
vcpu->vcpu_id = atomic_xadd_int(&vm->hw.created_vcpus, 1); vcpu->vcpu_id = atomic_xadd(&vm->hw.created_vcpus, 1);
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */ /* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
atomic_store_rel_64( atomic_store_rel_64(
(unsigned long *)&vm->hw.vcpu_array[vcpu->vcpu_id], (unsigned long *)&vm->hw.vcpu_array[vcpu->vcpu_id],

View File

@ -52,7 +52,7 @@ DEFINE_CPU_DATA(struct shared_buf *, earlylog_sbuf);
struct logmsg { struct logmsg {
uint32_t flags; uint32_t flags;
unsigned int seq; int seq;
spinlock_t lock; spinlock_t lock;
}; };

View File

@ -47,7 +47,7 @@ struct vm_attr {
struct vm_hw_info { struct vm_hw_info {
int num_vcpus; /* Number of total virtual cores */ int num_vcpus; /* Number of total virtual cores */
int exp_num_vcpus; /* Number of real expected virtual cores */ int exp_num_vcpus; /* Number of real expected virtual cores */
uint32_t created_vcpus; /* Number of created vcpus */ int created_vcpus; /* Number of created vcpus */
struct vcpu **vcpu_array; /* vcpu array of this VM */ struct vcpu **vcpu_array; /* vcpu array of this VM */
uint64_t gpa_lowtop; /* top lowmem gpa of this VM */ uint64_t gpa_lowtop; /* top lowmem gpa of this VM */
}; };

View File

@ -226,31 +226,29 @@ static inline int atomic_cmpxchg_int(unsigned int *p,
#define atomic_load_acq_64 atomic_load_acq_long #define atomic_load_acq_64 atomic_load_acq_long
#define atomic_store_rel_64 atomic_store_rel_long #define atomic_store_rel_64 atomic_store_rel_long
/* #define build_atomic_xadd(name, size, type, ptr, v) \
* #define atomic_xadd_int(P, V) \ static inline type name(type *ptr, type v) \
* (return (*(unsigned long *)(P)); *(unsigned long *)(P) += (V);) { \
*/ asm volatile(BUS_LOCK "xadd" size " %0,%1" \
static inline int atomic_xadd_int(unsigned int *p, unsigned int v) : "+r" (v), "+m" (*p) \
{ : \
__asm __volatile(BUS_LOCK "xaddl %0,%1" : "cc", "memory"); \
: "+r" (v), "+m" (*p) return v; \
: }
: "cc", "memory"); build_atomic_xadd(atomic_xadd, "l", int, p, v)
return v; build_atomic_xadd(atomic_xadd64, "q", long, p, v)
}
static inline int atomic_add_return(int v, unsigned int *p) #define atomic_add_return(p, v) ( atomic_xadd(p, v) + v )
{ #define atomic_sub_return(p, v) ( atomic_xadd(p, -v) - v )
return v + atomic_xadd_int(p, v);
}
static inline int atomic_sub_return(int v, unsigned int *p) #define atomic_inc_return(v) atomic_add_return((v), 1)
{ #define atomic_dec_return(v) atomic_sub_return((v), 1)
return atomic_xadd_int(p, -v) - v;
}
#define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_add64_return(p, v) ( atomic_xadd64(p, v) + v )
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_sub64_return(p, v) ( atomic_xadd64(p, -v) - v )
#define atomic_inc64_return(v) atomic_add64_return((v), 1)
#define atomic_dec64_return(v) atomic_sub64_return((v), 1)
static inline int static inline int
atomic_cmpset_long(unsigned long *dst, unsigned long expect, unsigned long src) atomic_cmpset_long(unsigned long *dst, unsigned long expect, unsigned long src)