mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-21 08:50:27 +00:00
hv: refine atomic_load/store_xxx name
rename atomic_load/store_xxx32 to atomic_load/store rename atomic_load/store_xxx64 to atomic_load64/store64 Signed-off-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -98,7 +98,7 @@ is_entry_invalid(struct ptdev_remapping_info *entry)
|
||||
static inline bool
|
||||
is_entry_active(struct ptdev_remapping_info *entry)
|
||||
{
|
||||
return atomic_load_acq_int(&entry->active) == ACTIVE_FLAG;
|
||||
return atomic_load((int *)&entry->active) == ACTIVE_FLAG;
|
||||
}
|
||||
|
||||
/* require ptdev_lock protect */
|
||||
|
@@ -82,9 +82,9 @@ int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
*/
|
||||
vcpu->vcpu_id = atomic_xadd(&vm->hw.created_vcpus, 1);
|
||||
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
|
||||
atomic_store_rel_64(
|
||||
(unsigned long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(unsigned long)vcpu);
|
||||
atomic_store64(
|
||||
(long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(long)vcpu);
|
||||
|
||||
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
|
||||
"Allocated vcpu_id is out of range!");
|
||||
@@ -221,9 +221,9 @@ int destroy_vcpu(struct vcpu *vcpu)
|
||||
ASSERT(vcpu != NULL, "Incorrect arguments");
|
||||
|
||||
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
|
||||
atomic_store_rel_64(
|
||||
(unsigned long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(unsigned long)NULL);
|
||||
atomic_store64(
|
||||
(long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
|
||||
(long)NULL);
|
||||
|
||||
atomic_dec(&vcpu->vm->hw.created_vcpus);
|
||||
|
||||
@@ -282,13 +282,13 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
|
||||
vcpu->state = new_state;
|
||||
|
||||
get_schedule_lock(vcpu->pcpu_id);
|
||||
if (atomic_load_acq_32(&vcpu->running) == 1) {
|
||||
if (atomic_load(&vcpu->running) == 1) {
|
||||
remove_vcpu_from_runqueue(vcpu);
|
||||
make_reschedule_request(vcpu);
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
|
||||
if (vcpu->pcpu_id != pcpu_id) {
|
||||
while (atomic_load_acq_32(&vcpu->running) == 1)
|
||||
while (atomic_load(&vcpu->running) == 1)
|
||||
__asm__ __volatile("pause" ::: "memory");
|
||||
}
|
||||
} else {
|
||||
|
@@ -484,7 +484,7 @@ vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
|
||||
uint32_t val;
|
||||
|
||||
idx = lvt_off_to_idx(offset);
|
||||
val = atomic_load_acq_32(&vlapic->lvt_last[idx]);
|
||||
val = atomic_load((int *)&vlapic->lvt_last[idx]);
|
||||
return val;
|
||||
}
|
||||
|
||||
@@ -547,7 +547,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
|
||||
vlapic_update_lvtt(vlapic, val);
|
||||
|
||||
*lvtptr = val;
|
||||
atomic_store_rel_32(&vlapic->lvt_last[idx], val);
|
||||
atomic_store((int *)&vlapic->lvt_last[idx], val);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1097,7 +1097,7 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
|
||||
irrptr = &lapic->irr[0];
|
||||
|
||||
for (i = 7; i >= 0; i--) {
|
||||
val = atomic_load_acq_int(&irrptr[i].val);
|
||||
val = atomic_load((int *)&irrptr[i].val);
|
||||
bitpos = fls(val);
|
||||
if (bitpos >= 0) {
|
||||
vector = i * 32 + bitpos;
|
||||
@@ -2007,7 +2007,7 @@ apicv_pending_intr(struct vlapic *vlapic, __unused int *vecptr)
|
||||
|
||||
pir_desc = vlapic->pir_desc;
|
||||
|
||||
pending = atomic_load_acq_long(&pir_desc->pending);
|
||||
pending = atomic_load64((long *)&pir_desc->pending);
|
||||
if (!pending)
|
||||
return 0;
|
||||
|
||||
|
@@ -127,7 +127,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
|
||||
snprintf(&vm->attr.name[0], MAX_VM_NAME_LEN, "vm_%d",
|
||||
vm->attr.id);
|
||||
|
||||
atomic_store_rel_int(&vm->hw.created_vcpus, 0);
|
||||
atomic_store(&vm->hw.created_vcpus, 0);
|
||||
|
||||
/* gpa_lowtop are used for system start up */
|
||||
vm->hw.gpa_lowtop = 0;
|
||||
|
Reference in New Issue
Block a user