hv: refine atomic_load/store_xxx name

rename atomic_load/store_xxx32 to atomic_load/store
rename atomic_load/store_xxx64 to atomic_load64/store64

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1
2018-05-17 10:11:43 +08:00
committed by lijinxia
parent 336a8883db
commit 1f3da93e74
10 changed files with 47 additions and 78 deletions

View File

@@ -363,7 +363,7 @@ static void complete_request(struct vcpu *vcpu)
req_buf = (struct vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
req_buf->req_queue[vcpu->vcpu_id].valid = false;
atomic_store_rel_32(&vcpu->ioreq_pending, 0);
atomic_store(&vcpu->ioreq_pending, 0);
return;
}
@@ -900,7 +900,7 @@ int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
fire_vhm_interrupt();
/* pause vcpu, wait for VHM to handle the MMIO request */
atomic_store_rel_32(&vcpu->ioreq_pending, 1);
atomic_store(&vcpu->ioreq_pending, 1);
pause_vcpu(vcpu, VCPU_PAUSED);
return 0;

View File

@@ -146,7 +146,7 @@ static void context_switch_out(struct vcpu *vcpu)
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store_rel_32(&vcpu->running, 0);
atomic_store(&vcpu->running, 0);
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
@@ -163,7 +163,7 @@ static void context_switch_in(struct vcpu *vcpu)
if (vcpu == NULL)
return;
atomic_store_rel_32(&vcpu->running, 1);
atomic_store(&vcpu->running, 1);
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.