mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-24 02:08:04 +00:00
HV:treewide:Update the type of return value and parameters of atomic operations
There are many type conversions in the atomic operations invoking reported by static analysis tool. These type conversions voilate MISRA C. To keep uniform naming convention, rename atomic operation function names: atomic_set_int/long --> atomic_set32/64; atomic_clear_int/long --> atomic_clear32/64; atomic_load --> atomic_load32; atomic_store --> atomic_store32; atomic_swap --> atomic_swap32; atomic_readandclear --> atomic_readandclear32; atomic_inc --> atomic_inc32; atomic_dec --> atomic_dec32; atomic_cmpxchg --> atomic_cmpxchg32; atomic_xadd --> atomic_xadd32. Update the type of atomic_load32/64, atomic_store32/64, atomic_swap32/64, atomic_cmpxchg32/6. Update related variables and callers. Note: the type of return value and parameters of atomic_xadd32/64 still keep signed int/long since caller pass negative variable to atomic_xadd32/64; V1-->V2: Add comments for atomic_set/clear to differ from bitmap_set/clear. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Reviewed-by: Junjie.Mao <junjie.mao@intel.com>
This commit is contained in:
@@ -347,7 +347,7 @@ static void complete_request(struct vcpu *vcpu)
|
||||
req_buf = (union vhm_request_buffer *)
|
||||
vcpu->vm->sw.io_shared_page;
|
||||
req_buf->req_queue[vcpu->vcpu_id].valid = false;
|
||||
atomic_store(&vcpu->ioreq_pending, 0);
|
||||
atomic_store32(&vcpu->ioreq_pending, 0U);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
|
||||
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
|
||||
* should fix the race issue between req.valid = true and vcpu pause
|
||||
*/
|
||||
atomic_store(&vcpu->ioreq_pending, 1);
|
||||
atomic_store32(&vcpu->ioreq_pending, 1U);
|
||||
pause_vcpu(vcpu, VCPU_PAUSED);
|
||||
|
||||
/* Must clear the signal before we mark req valid
|
||||
|
@@ -75,7 +75,7 @@ alloc_entry(struct vm *vm, enum ptdev_intr_type type)
|
||||
INIT_LIST_HEAD(&entry->softirq_node);
|
||||
INIT_LIST_HEAD(&entry->entry_node);
|
||||
|
||||
atomic_clear_int(&entry->active, ACTIVE_FLAG);
|
||||
atomic_clear32(&entry->active, ACTIVE_FLAG);
|
||||
list_add(&entry->entry_node, &ptdev_list);
|
||||
|
||||
return entry;
|
||||
@@ -140,7 +140,7 @@ ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq,
|
||||
ASSERT(node != NULL, "dev register failed");
|
||||
entry->node = node;
|
||||
|
||||
atomic_set_int(&entry->active, ACTIVE_FLAG);
|
||||
atomic_set32(&entry->active, ACTIVE_FLAG);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -148,7 +148,7 @@ ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
spinlock_rflags;
|
||||
|
||||
atomic_clear_int(&entry->active, ACTIVE_FLAG);
|
||||
atomic_clear32(&entry->active, ACTIVE_FLAG);
|
||||
|
||||
unregister_handler_common(entry->node);
|
||||
entry->node = NULL;
|
||||
|
@@ -113,7 +113,7 @@ static void context_switch_out(struct vcpu *vcpu)
|
||||
/* cancel event(int, gp, nmi and exception) injection */
|
||||
cancel_event_injection(vcpu);
|
||||
|
||||
atomic_store(&vcpu->running, 0);
|
||||
atomic_store32(&vcpu->running, 0U);
|
||||
/* do prev vcpu context switch out */
|
||||
/* For now, we don't need to invalid ept.
|
||||
* But if we have more than one vcpu on one pcpu,
|
||||
@@ -131,7 +131,7 @@ static void context_switch_in(struct vcpu *vcpu)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_store(&vcpu->running, 1);
|
||||
atomic_store32(&vcpu->running, 1U);
|
||||
/* FIXME:
|
||||
* Now, we don't need to load new vcpu VMCS because
|
||||
* we only do switch between vcpu loop and idle loop.
|
||||
|
Reference in New Issue
Block a user