mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-02 16:30:12 +00:00
hv: io_req: refine vhm_req status setting
In spite of vhm_req status could be updated in HV and DM on different CPUs, they only change vhm_req status when they detect vhm_req status has been updated by each other. So vhm_req status will not been misconfigured. However, before HV sets vhm_req status to REQ_STATE_PENDING, vhm_req buffer filling should be visible to DM. Add a write memory barrier to guarantee this. Tracked-On: #1842 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
1ea3052f80
commit
5930e96d12
@ -6,7 +6,6 @@
|
|||||||
#include <vm.h>
|
#include <vm.h>
|
||||||
#include <irq.h>
|
#include <irq.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <atomic.h>
|
|
||||||
#include <ept.h>
|
#include <ept.h>
|
||||||
#include <logmsg.h>
|
#include <logmsg.h>
|
||||||
|
|
||||||
@ -113,6 +112,9 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
|
|||||||
pause_vcpu(vcpu, VCPU_PAUSED);
|
pause_vcpu(vcpu, VCPU_PAUSED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Before updating the vhm_req state, enforce all fill vhm_req operations done */
|
||||||
|
cpu_write_memory_barrier();
|
||||||
|
|
||||||
/* Must clear the signal before we mark req as pending
|
/* Must clear the signal before we mark req as pending
|
||||||
* Once we mark it pending, VHM may process req and signal us
|
* Once we mark it pending, VHM may process req and signal us
|
||||||
* before we perform upcall.
|
* before we perform upcall.
|
||||||
@ -168,7 +170,7 @@ uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id)
|
|||||||
} else {
|
} else {
|
||||||
stac();
|
stac();
|
||||||
vhm_req = &req_buf->req_queue[vhm_req_id];
|
vhm_req = &req_buf->req_queue[vhm_req_id];
|
||||||
state = atomic_load32(&vhm_req->processed);
|
state = vhm_req->processed;
|
||||||
clac();
|
clac();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +186,13 @@ void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state)
|
|||||||
if (req_buf != NULL) {
|
if (req_buf != NULL) {
|
||||||
stac();
|
stac();
|
||||||
vhm_req = &req_buf->req_queue[vhm_req_id];
|
vhm_req = &req_buf->req_queue[vhm_req_id];
|
||||||
atomic_store32(&vhm_req->processed, state);
|
/*
|
||||||
|
* HV will only set processed to REQ_STATE_PENDING or REQ_STATE_FREE.
|
||||||
|
* we don't need to sfence here is that even if the SOS/DM sees the previous state,
|
||||||
|
* the only side effect is that it will defer the processing of the new IOReq.
|
||||||
|
* It won't lead wrong processing.
|
||||||
|
*/
|
||||||
|
vhm_req->processed = state;
|
||||||
clac();
|
clac();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,7 +253,12 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
|
|
||||||
|
/*
|
||||||
|
* Only HV will check whether processed is REQ_STATE_FREE on per-vCPU before inject a ioreq.
|
||||||
|
* Only HV will set processed to REQ_STATE_FREE when ioreq is done.
|
||||||
|
*/
|
||||||
|
vhm_req->processed = REQ_STATE_FREE;
|
||||||
clac();
|
clac();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -351,6 +351,12 @@ static inline void cpu_sp_write(uint64_t *stack_ptr)
|
|||||||
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
|
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Synchronizes all write accesses to memory */
|
||||||
|
static inline void cpu_write_memory_barrier(void)
|
||||||
|
{
|
||||||
|
asm volatile ("sfence\n" : : : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
/* Synchronizes all read and write accesses to/from memory */
|
/* Synchronizes all read and write accesses to/from memory */
|
||||||
static inline void cpu_memory_barrier(void)
|
static inline void cpu_memory_barrier(void)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user