hv: refine IOREQ state operation functions in hypervisor

1) add functions to set/get VHM request state.
2) modify 'complete_ioreq()' in io.c
3) update the caller code

Tracked-On: #2056
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Yonghua Huang 2018-12-13 02:01:50 +08:00 committed by wenlingz
parent c89d6e6526
commit 57dfc7de05
4 changed files with 85 additions and 58 deletions

View File

@ -8,8 +8,27 @@
#include "guest/instr_emul.h"
static void complete_ioreq(struct vhm_request *vhm_req)
static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
if (io_req != NULL) {
switch (vcpu->req.type) {
case REQ_PORTIO:
io_req->reqs.pio.value = vhm_req->reqs.pio.value;
break;
case REQ_MMIO:
io_req->reqs.mmio.value = vhm_req->reqs.mmio.value;
break;
default:
break;
}
}
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
}
@ -47,19 +66,9 @@ emulate_pio_post(struct acrn_vcpu *vcpu, const struct io_request *io_req)
*/
static void dm_emulate_pio_post(struct acrn_vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
union vhm_request_buffer *req_buf = NULL;
struct io_request *io_req = &vcpu->req;
struct pio_request *pio_req = &io_req->reqs.pio;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur];
pio_req->value = vhm_req->reqs.pio.value;
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);
complete_ioreq(vcpu, io_req);
emulate_pio_post(vcpu, io_req);
}
@ -98,19 +107,9 @@ void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io
*/
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
vhm_req = &req_buf->req_queue[cur];
mmio_req->value = vhm_req->reqs.mmio.value;
/* VHM emulation data already copy to req, mark to free slot now */
complete_ioreq(vhm_req);
complete_ioreq(vcpu, io_req);
emulate_mmio_post(vcpu, io_req);
}
@ -133,13 +132,8 @@ static void io_instr_dest_handler(struct io_request *io_req)
*/
void emulate_io_post(struct acrn_vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
if (atomic_load32(&vhm_req->processed) != REQ_STATE_COMPLETE) {
if (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id)
!= REQ_STATE_COMPLETE) {
return;
}
@ -148,7 +142,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
* mark ioreq done and don't resume vcpu.
*/
if (vcpu->state == VCPU_ZOMBIE) {
complete_ioreq(vhm_req);
complete_ioreq(vcpu, NULL);
return;
}
@ -171,7 +165,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
default:
/* REQ_WP can only be triggered on writes which do not need
* post-work. Just mark the ioreq done. */
complete_ioreq(vhm_req);
complete_ioreq(vcpu, NULL);
break;
}

View File

@ -425,7 +425,6 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
uint64_t hpa;
struct acrn_set_ioreq_buffer iobuf;
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
union vhm_request_buffer *req_buf;
uint16_t i;
if (target_vm == NULL) {
@ -451,10 +450,8 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
}
target_vm->sw.io_shared_page = hpa2hva(hpa);
req_buf = target_vm->sw.io_shared_page;
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
atomic_store32(&req_buf->req_queue[i].processed, REQ_STATE_FREE);
set_vhm_req_state(target_vm, i, REQ_STATE_FREE);
}
return 0;

View File

@ -65,30 +65,15 @@ static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req)
void reset_vm_ioreqs(struct acrn_vm *vm)
{
uint16_t i;
union vhm_request_buffer *req_buf;
req_buf = vm->sw.io_shared_page;
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
atomic_store32(&req_buf->req_queue[i].processed, REQ_STATE_FREE);
set_vhm_req_state(vm, i, REQ_STATE_FREE);
}
}
static bool has_complete_ioreq(struct acrn_vcpu *vcpu)
static inline bool has_complete_ioreq(struct acrn_vcpu *vcpu)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
struct acrn_vm *vm;
vm = vcpu->vm;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
if (req_buf != NULL) {
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
if (atomic_load32(&vhm_req->processed) == REQ_STATE_COMPLETE) {
return true;
}
}
return false;
return (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE);
}
/**
@ -132,13 +117,13 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
return -EINVAL;
}
ASSERT(get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_FREE,
"VHM request buffer is busy");
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
cur = vcpu->vcpu_id;
vhm_req = &req_buf->req_queue[cur];
ASSERT(atomic_load32(&vhm_req->processed) == REQ_STATE_FREE,
"VHM request buffer is busy");
/* ACRN insert request to VHM and inject upcall */
vhm_req->type = io_req->type;
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
@ -158,7 +143,7 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
* before we perform upcall.
* because VHM can work in pulling mode without wait for upcall
*/
atomic_store32(&vhm_req->processed, REQ_STATE_PENDING);
set_vhm_req_state(vcpu->vm, vcpu->vcpu_id, REQ_STATE_PENDING);
acrn_print_request(vcpu->vcpu_id, vhm_req);
@ -167,3 +152,34 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
return 0;
}
uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id)
{
uint32_t state;
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
if (req_buf == NULL) {
return (uint32_t)-1;
}
vhm_req = &req_buf->req_queue[vhm_req_id];
state = atomic_load32(&vhm_req->processed);
return state;
}
void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
if (req_buf == NULL) {
return;
}
vhm_req = &req_buf->req_queue[vhm_req_id];
atomic_store32(&vhm_req->processed, state);
}

View File

@ -305,6 +305,26 @@ void reset_vm_ioreqs(struct acrn_vm *vm);
*/
void handle_complete_ioreq(uint16_t pcpu_id);
/**
* @brief Get the state of VHM request
*
* @param vm Target VM context
* @param vhm_req_id VHM Request ID
*
* @return State of the IO Request.
*/
uint32_t get_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id);
/**
* @brief Set the state of VHM request
*
* @param vm Target VM context
* @param vhm_req_id VHM Request ID
* @param state State to be set
* @return None
*/
void set_vhm_req_state(struct acrn_vm *vm, uint16_t vhm_req_id, uint32_t state);
/**
* @}
*/