From 5930e96d12af790427605e7f75b2c96e37df9bf7 Mon Sep 17 00:00:00 2001 From: "Li, Fei1" Date: Wed, 3 Jul 2019 00:21:43 +0800 Subject: [PATCH] hv: io_req: refine vhm_req status setting In spite of vhm_req status could be updated in HV and DM on different CPUs, they only change vhm_req status when they detect vhm_req status has been updated by each other. So vhm_req status will not been misconfigured. However, before HV sets vhm_req status to REQ_STATE_PENDING, vhm_req buffer filling should be visible to DM. Add a write memory barrier to guarantee this. Tracked-On: #1842 Signed-off-by: Li, Fei1 --- hypervisor/dm/io_req.c | 21 +++++++++++++++++---- hypervisor/include/arch/x86/cpu.h | 6 ++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index e16509b9a..168f00c1d 100644 --- a/hypervisor/dm/io_req.c +++ b/hypervisor/dm/io_req.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -113,6 +112,9 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_ pause_vcpu(vcpu, VCPU_PAUSED); } + /* Before updating the vhm_req state, enforce all fill vhm_req operations done */ + cpu_write_memory_barrier(); + /* Must clear the signal before we mark req as pending * Once we mark it pending, VHM may process req and signal us * before we perform upcall. @@ -168,7 +170,7 @@ uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id) } else { stac(); vhm_req = &req_buf->req_queue[vhm_req_id]; - state = atomic_load32(&vhm_req->processed); + state = vhm_req->processed; clac(); } @@ -184,7 +186,13 @@ void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state) if (req_buf != NULL) { stac(); vhm_req = &req_buf->req_queue[vhm_req_id]; - atomic_store32(&vhm_req->processed, state); + /* + * HV will only set processed to REQ_STATE_PENDING or REQ_STATE_FREE. + * we don't need to sfence here is that even if the SOS/DM sees the previous state, + * the only side effect is that it will defer the processing of the new IOReq. + * It won't lead wrong processing. + */ + vhm_req->processed = state; clac(); } } @@ -245,7 +253,12 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req) break; } } - atomic_store32(&vhm_req->processed, REQ_STATE_FREE); + + /* + * Only HV will check whether processed is REQ_STATE_FREE on per-vCPU before inject a ioreq. + * Only HV will set processed to REQ_STATE_FREE when ioreq is done. + */ + vhm_req->processed = REQ_STATE_FREE; clac(); } diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h index 489cc1cfb..e5cf48812 100644 --- a/hypervisor/include/arch/x86/cpu.h +++ b/hypervisor/include/arch/x86/cpu.h @@ -351,6 +351,12 @@ static inline void cpu_sp_write(uint64_t *stack_ptr) asm volatile ("movq %0, %%rsp" : : "r"(rsp)); } +/* Synchronizes all write accesses to memory */ +static inline void cpu_write_memory_barrier(void) +{ + asm volatile ("sfence\n" : : : "memory"); +} + /* Synchronizes all read and write accesses to/from memory */ static inline void cpu_memory_barrier(void) {