mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-24 02:08:04 +00:00
HV:treewide:Update the type of return value and parameters of atomic operations
There are many type conversions in the atomic operations invoking reported by static analysis tool. These type conversions voilate MISRA C. To keep uniform naming convention, rename atomic operation function names: atomic_set_int/long --> atomic_set32/64; atomic_clear_int/long --> atomic_clear32/64; atomic_load --> atomic_load32; atomic_store --> atomic_store32; atomic_swap --> atomic_swap32; atomic_readandclear --> atomic_readandclear32; atomic_inc --> atomic_inc32; atomic_dec --> atomic_dec32; atomic_cmpxchg --> atomic_cmpxchg32; atomic_xadd --> atomic_xadd32. Update the type of atomic_load32/64, atomic_store32/64, atomic_swap32/64, atomic_cmpxchg32/6. Update related variables and callers. Note: the type of return value and parameters of atomic_xadd32/64 still keep signed int/long since caller pass negative variable to atomic_xadd32/64; V1-->V2: Add comments for atomic_set/clear to differ from bitmap_set/clear. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Reviewed-by: Junjie.Mao <junjie.mao@intel.com>
This commit is contained in:
@@ -66,8 +66,8 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
vcpu->vcpu_id = atomic_xadd16(&vm->hw.created_vcpus, 1U);
|
||||
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
|
||||
atomic_store64(
|
||||
(long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(long)vcpu);
|
||||
(uint64_t *)&vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(uint64_t)vcpu);
|
||||
|
||||
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
|
||||
"Allocated vcpu_id is out of range!");
|
||||
@@ -243,8 +243,8 @@ void destroy_vcpu(struct vcpu *vcpu)
|
||||
|
||||
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
|
||||
atomic_store64(
|
||||
(long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(long)NULL);
|
||||
(uint64_t *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(uint64_t)NULL);
|
||||
|
||||
atomic_dec16(&vcpu->vm->hw.created_vcpus);
|
||||
|
||||
@@ -300,13 +300,13 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
|
||||
vcpu->prev_state = vcpu->state;
|
||||
vcpu->state = new_state;
|
||||
|
||||
if (atomic_load(&vcpu->running) == 1) {
|
||||
if (atomic_load32(&vcpu->running) == 1U) {
|
||||
remove_vcpu_from_runqueue(vcpu);
|
||||
make_reschedule_request(vcpu);
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
|
||||
if (vcpu->pcpu_id != pcpu_id) {
|
||||
while (atomic_load(&vcpu->running) == 1)
|
||||
while (atomic_load32(&vcpu->running) == 1U)
|
||||
__asm__ __volatile("pause" ::: "memory");
|
||||
}
|
||||
} else {
|
||||
|
@@ -568,7 +568,7 @@ vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
|
||||
uint32_t idx, val;
|
||||
|
||||
idx = lvt_off_to_idx(offset);
|
||||
val = atomic_load((int *)&vlapic->lvt_last[idx]);
|
||||
val = atomic_load32(&vlapic->lvt_last[idx]);
|
||||
return val;
|
||||
}
|
||||
|
||||
@@ -636,7 +636,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
|
||||
}
|
||||
|
||||
*lvtptr = val;
|
||||
atomic_store((int *)&vlapic->lvt_last[idx], val);
|
||||
atomic_store32(&vlapic->lvt_last[idx], val);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1198,7 +1198,7 @@ vlapic_pending_intr(struct vlapic *vlapic, uint32_t *vecptr)
|
||||
/* i ranges effectively from 7 to 0 */
|
||||
for (i = 8U; i > 0U; ) {
|
||||
i--;
|
||||
val = atomic_load((int *)&irrptr[i].val);
|
||||
val = atomic_load32(&irrptr[i].val);
|
||||
bitpos = (uint32_t)fls32(val);
|
||||
if (bitpos != INVALID_BIT_INDEX) {
|
||||
vector = i * 32U + bitpos;
|
||||
@@ -1233,7 +1233,7 @@ vlapic_intr_accepted(struct vlapic *vlapic, uint32_t vector)
|
||||
idx = vector / 32U;
|
||||
|
||||
irrptr = &lapic->irr[0];
|
||||
atomic_clear_int(&irrptr[idx].val, 1U << (vector % 32U));
|
||||
atomic_clear32(&irrptr[idx].val, 1U << (vector % 32U));
|
||||
vlapic_dump_irr(vlapic, "vlapic_intr_accepted");
|
||||
|
||||
isrptr = &lapic->isr[0];
|
||||
@@ -2183,8 +2183,8 @@ apicv_set_intr_ready(struct vlapic *vlapic, uint32_t vector, __unused bool level
|
||||
idx = vector / 64U;
|
||||
mask = 1UL << (vector % 64U);
|
||||
|
||||
atomic_set_long(&pir_desc->pir[idx], mask);
|
||||
notify = (atomic_cmpxchg64((long *)&pir_desc->pending, 0, 1) == 0) ? 1 : 0;
|
||||
atomic_set64(&pir_desc->pir[idx], mask);
|
||||
notify = (atomic_cmpxchg64(&pir_desc->pending, 0UL, 1UL) == 0UL) ? 1 : 0;
|
||||
return notify;
|
||||
}
|
||||
|
||||
@@ -2198,7 +2198,7 @@ apicv_pending_intr(struct vlapic *vlapic, __unused uint32_t *vecptr)
|
||||
|
||||
pir_desc = vlapic->pir_desc;
|
||||
|
||||
pending = atomic_load64((long *)&pir_desc->pending);
|
||||
pending = atomic_load64(&pir_desc->pending);
|
||||
if (pending == 0U) {
|
||||
return 0;
|
||||
}
|
||||
@@ -2308,7 +2308,7 @@ apicv_inject_pir(struct vlapic *vlapic)
|
||||
struct lapic_reg *irr = NULL;
|
||||
|
||||
pir_desc = vlapic->pir_desc;
|
||||
if (atomic_cmpxchg64((long *)(&pir_desc->pending), 1, 0) != 1) {
|
||||
if (atomic_cmpxchg64(&pir_desc->pending, 1UL, 0UL) != 1UL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2317,7 +2317,7 @@ apicv_inject_pir(struct vlapic *vlapic)
|
||||
irr = &lapic->irr[0];
|
||||
|
||||
for (i = 0U; i < 4U; i++) {
|
||||
val = atomic_readandclear64((long *)&pir_desc->pir[i]);
|
||||
val = atomic_readandclear64(&pir_desc->pir[i]);
|
||||
if (val != 0UL) {
|
||||
irr[i * 2U].val |= (uint32_t)val;
|
||||
irr[(i * 2U) + 1U].val |= (uint32_t)(val >> 32);
|
||||
|
Reference in New Issue
Block a user