io_emul: reshuffle io emulation path

This is the following patch after removing pending_pre_work, it make sure
all io emulation is done on its own cpu.

For hv emulated request, it follows the steps:
	hv_emulate_pio->emulate_pio_complete
	hv_emulate_mmio->emulate_mmio_complete

For dm emulated request, it follows the steps:
	acrn_insert_request->dm_emulate_io_complete
while in acrn_insert_request, it could trigger scheduling out then resume by
hcall_notify_ioreq_finish, or busy wait for ioreq completion if polling mode
is enabled.

Tracked-On: #2394
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Xu, Anthony <anthony.xu@intel.com>
This commit is contained in:
Jason Chen CJ 2019-01-22 11:35:15 +08:00 committed by wenlingz
parent fb41ea5cfb
commit 15030f6f2c
6 changed files with 34 additions and 112 deletions

View File

@ -324,19 +324,7 @@ I/O Emulation
The following APIs are provided for I/O emulation at runtime:
.. doxygenfunction:: emulate_io
:project: Project ACRN
.. doxygenfunction:: acrn_insert_request_wait
:project: Project ACRN
.. doxygenfunction:: emulate_io_post
:project: Project ACRN
.. doxygenfunction:: emulate_mmio_post
:project: Project ACRN
.. doxygenfunction:: dm_emulate_mmio_post
.. doxygenfunction:: acrn_insert_request
:project: Project ACRN
.. doxygenfunction:: pio_instr_vmexit_handler

View File

@ -37,7 +37,7 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
}
/**
* @brief Post-work for port I/O emulation
* @brief General complete-work for port I/O emulation
*
* @pre io_req->type == REQ_PORTIO
*
@ -46,7 +46,7 @@ static void complete_ioreq(struct acrn_vcpu *vcpu, struct io_request *io_req)
* request having transferred to the COMPLETE state.
*/
static void
emulate_pio_post(struct acrn_vcpu *vcpu, const struct io_request *io_req)
emulate_pio_complete(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
const struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
@ -61,24 +61,24 @@ emulate_pio_post(struct acrn_vcpu *vcpu, const struct io_request *io_req)
}
/**
* @brief Post-work of VHM requests for port I/O emulation
* @brief Complete-work of VHM requests for port I/O emulation
*
* @pre vcpu->req.type == REQ_PORTIO
*
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
static void dm_emulate_pio_post(struct acrn_vcpu *vcpu)
static void dm_emulate_pio_complete(struct acrn_vcpu *vcpu)
{
struct io_request *io_req = &vcpu->req;
complete_ioreq(vcpu, io_req);
emulate_pio_post(vcpu, io_req);
emulate_pio_complete(vcpu, io_req);
}
/**
* @brief General post-work for MMIO emulation
* @brief General complete-work for MMIO emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
* @param io_req The I/O request holding the details of the MMIO access
@ -89,7 +89,7 @@ static void dm_emulate_pio_post(struct acrn_vcpu *vcpu)
* either a previous call to emulate_io() returning 0 or the corresponding VHM
* request transferring to the COMPLETE state.
*/
void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io_req)
static void emulate_mmio_complete(const struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
const struct mmio_request *mmio_req = &io_req->reqs.mmio;
@ -100,7 +100,7 @@ void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io
}
/**
* @brief Post-work of VHM requests for MMIO emulation
* @brief Complete-work of VHM requests for MMIO emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
*
@ -109,13 +109,13 @@ void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu)
static void dm_emulate_mmio_complete(struct acrn_vcpu *vcpu)
{
struct io_request *io_req = &vcpu->req;
complete_ioreq(vcpu, io_req);
emulate_mmio_post(vcpu, io_req);
emulate_mmio_complete(vcpu, io_req);
}
#ifdef CONFIG_PARTITION_MODE
@ -130,11 +130,11 @@ static void io_instr_dest_handler(struct io_request *io_req)
#endif
/**
* @brief General post-work for all kinds of VHM requests for I/O emulation
* @brief General complete-work for all kinds of VHM requests for I/O emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
*/
void emulate_io_post(struct acrn_vcpu *vcpu)
static void dm_emulate_io_complete(struct acrn_vcpu *vcpu)
{
if (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE) {
/*
@ -146,7 +146,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
} else {
switch (vcpu->req.type) {
case REQ_MMIO:
dm_emulate_mmio_post(vcpu);
dm_emulate_mmio_complete(vcpu);
break;
case REQ_PORTIO:
@ -158,7 +158,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
* REQ_PORTIO & REQ_PCICFG requests are exactly the same and
* post-work is mainly interested in the read value.
*/
dm_emulate_pio_post(vcpu);
dm_emulate_pio_complete(vcpu);
break;
default:
@ -170,9 +170,6 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
break;
}
if (!vcpu->vm->sw.is_completion_polling) {
resume_vcpu(vcpu);
}
}
}
}
@ -292,7 +289,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
* @retval -EINVAL \p io_req has an invalid type.
* @retval <0 on other errors during emulation.
*/
int32_t
static int32_t
emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int32_t status;
@ -300,10 +297,16 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
switch (io_req->type) {
case REQ_PORTIO:
status = hv_emulate_pio(vcpu, io_req);
if (status == 0) {
emulate_pio_complete(vcpu, io_req);
}
break;
case REQ_MMIO:
case REQ_WP:
status = hv_emulate_mmio(vcpu, io_req);
if (status == 0) {
emulate_mmio_complete(vcpu, io_req);
}
break;
default:
/* Unknown I/O request type */
@ -326,9 +329,10 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
*
* ACRN insert request to VHM and inject upcall.
*/
status = acrn_insert_request_wait(vcpu, io_req);
if (status != 0) {
status = acrn_insert_request(vcpu, io_req);
if (status == 0) {
dm_emulate_io_complete(vcpu);
} else {
/* here for both IO & MMIO, the direction, address,
* size definition is same
*/
@ -337,8 +341,6 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
"addr = 0x%llx, size=%lu", __func__,
pio_req->direction, io_req->type,
pio_req->address, pio_req->size);
} else {
status = IOREQ_PENDING;
}
#endif
}
@ -381,14 +383,6 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
status = emulate_io(vcpu, io_req);
if (status == 0) {
emulate_pio_post(vcpu, io_req);
} else if (status == IOREQ_PENDING) {
status = 0;
} else {
/* do nothing */
}
return status;
}
@ -452,13 +446,6 @@ int32_t ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
if (ret > 0) {
status = emulate_io(vcpu, io_req);
if (status == 0) {
emulate_mmio_post(vcpu, io_req);
} else {
if (status == IOREQ_PENDING) {
status = 0;
}
}
}
} else {
if (ret == -EFAULT) {

View File

@ -489,7 +489,9 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
ret = -EINVAL;
} else {
vcpu = vcpu_from_vid(target_vm, vcpu_id);
emulate_io_post(vcpu);
if (!vcpu->vm->sw.is_completion_polling) {
resume_vcpu(vcpu);
}
ret = 0;
}
}

View File

@ -87,12 +87,12 @@ static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
*
* @pre vcpu != NULL && io_req != NULL
*/
int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request *io_req)
int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
bool is_polling = false;
int32_t ret;
int32_t ret = 0;
uint16_t cur;
if (vcpu->vm->sw.io_shared_page != NULL) {
@ -149,19 +149,18 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
while (!need_reschedule(vcpu->pcpu_id)) {
if (has_complete_ioreq(vcpu)) {
/* we have completed ioreq pending */
emulate_io_post(vcpu);
break;
}
asm_pause();
}
} else if (need_reschedule(vcpu->pcpu_id)) {
schedule();
}
ret = 0;
} else {
ret = -EINVAL;
}
} else {
ret = -EINVAL;
}
return ret;
}

View File

@ -22,56 +22,6 @@
#define RTC_PIO_IDX (PM1B_CNT_PIO_IDX + 1U)
#define EMUL_PIO_IDX_MAX (RTC_PIO_IDX + 1U)
/**
* @brief General post-work for MMIO emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
* @param io_req The I/O request holding the details of the MMIO access
*
* @pre io_req->type == REQ_MMIO
*
* @remark This function must be called when \p io_req is completed, after
* either a previous call to emulate_io() returning 0 or the corresponding VHM
* request transferring to the COMPLETE state.
*/
void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io_req);
/**
* @brief Post-work of VHM requests for MMIO emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
*
* @pre vcpu->req.type == REQ_MMIO
*
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu);
/**
* @brief General post-work for all kinds of VHM requests for I/O emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
*/
void emulate_io_post(struct acrn_vcpu *vcpu);
/**
* @brief Emulate \p io_req for \p vcpu
*
* Handle an I/O request by either invoking a hypervisor-internal handler or
* deliver to VHM.
*
* @param vcpu The virtual CPU that triggers the MMIO access
* @param io_req The I/O request holding the details of the MMIO access
*
* @retval 0 Successfully emulated by registered handlers.
* @retval IOREQ_PENDING The I/O request is delivered to VHM.
* @retval -EIO The request spans multiple devices and cannot be emulated.
* @retval -EINVAL \p io_req has an invalid type.
* @retval <0 on other errors during emulation.
*/
int32_t emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req);
/**
* @brief The handler of VM exits on I/O instructions
*

View File

@ -17,10 +17,6 @@
* @{
*/
/* The return value of emulate_io() indicating the I/O request is delivered to
* VHM but not finished yet. */
#define IOREQ_PENDING 1
/**
* @brief Internal representation of a I/O request.
*/
@ -177,7 +173,7 @@ struct mem_io_node {
*
* @pre vcpu != NULL && io_req != NULL
*/
int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request *io_req);
int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_req);
/**
* @brief Reset all IO requests status of the VM