hv: decouple IO completion polling from idle thread

IO completion polling will access vcpu and vm structs. If doing it in
idle thread, there might be some race issues between vm destroying and
idle thread. They are running on different cores.
Got suggestion from Fengwei, decouple the polling action from idle
thread and just do it in vcpu thread, then we can guarantee idle thread
in really idle status.

Tracked-On: #1821
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
Shuo A Liu
2018-12-21 10:23:44 +08:00
committed by wenlingz
parent a0154223f6
commit 952943c3ea
4 changed files with 39 additions and 37 deletions

View File

@@ -146,7 +146,18 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
} else {
switch (vcpu->req.type) {
case REQ_MMIO:
request_vcpu_pre_work(vcpu, ACRN_VCPU_MMIO_COMPLETE);
/*
* In IO completion polling mode, the post work of IO emulation will
* be running on its own pcpu, then we can do MMIO post work directly;
* While in notification mode, the post work of IO emulation will be
* running on SOS pcpu, then we need request_vcpu_pre_work and let
* its own pcpu get scheduled and finish the MMIO post work.
*/
if (!vcpu->vm->sw.is_completion_polling) {
request_vcpu_pre_work(vcpu, ACRN_VCPU_MMIO_COMPLETE);
} else {
dm_emulate_mmio_post(vcpu);
}
break;
case REQ_PORTIO:
@@ -170,7 +181,9 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
break;
}
resume_vcpu(vcpu);
if (!vcpu->vm->sw.is_completion_polling) {
resume_vcpu(vcpu);
}
}
}
}

View File

@@ -101,7 +101,6 @@ void default_idle(__unused struct sched_object *obj)
cpu_dead();
} else {
CPU_IRQ_ENABLE();
handle_complete_ioreq(pcpu_id);
cpu_do_idle();
CPU_IRQ_DISABLE();
}

View File

@@ -78,29 +78,6 @@ static inline bool has_complete_ioreq(const struct acrn_vcpu *vcpu)
return (get_vhm_req_state(vcpu->vm, vcpu->vcpu_id) == REQ_STATE_COMPLETE);
}
/**
* @brief Handle completed ioreq if any one pending
*
* @param pcpu_id The physical cpu id of vcpu whose IO request to be checked
*
* @return None
*/
void handle_complete_ioreq(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
struct acrn_vm *vm;
if (vcpu != NULL) {
vm = vcpu->vm;
if (vm->sw.is_completion_polling) {
if (has_complete_ioreq(vcpu)) {
/* we have completed ioreq pending */
emulate_io_post(vcpu);
}
}
}
}
/**
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
*
@@ -113,6 +90,7 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
bool is_polling = false;
uint16_t cur;
if (vcpu->vm->sw.io_shared_page == NULL) {
@@ -133,14 +111,17 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
&io_req->reqs, sizeof(union vhm_io_request));
if (vcpu->vm->sw.is_completion_polling) {
vhm_req->completion_polling = 1U;
is_polling = true;
}
clac();
/* pause vcpu, wait for VHM to handle the MMIO request.
/* pause vcpu in notification mode , wait for VHM to handle the MMIO request.
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
* should fix the race issue between req.processed update and vcpu pause
*/
pause_vcpu(vcpu, VCPU_PAUSED);
if (!is_polling) {
pause_vcpu(vcpu, VCPU_PAUSED);
}
/* Must clear the signal before we mark req as pending
* Once we mark it pending, VHM may process req and signal us
@@ -158,6 +139,24 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
/* signal VHM */
fire_vhm_interrupt();
/* Polling completion of the request in polling mode */
if (is_polling) {
/*
* Now, we only have one case that will schedule out this vcpu
* from IO completion polling status, it's pause_vcpu to VCPU_ZOMBIE.
* In this case, we cannot come back to polling status again. Currently,
* it's OK as we needn't handle IO completion in zombie status.
*/
while (!need_reschedule(vcpu->pcpu_id)) {
if (has_complete_ioreq(vcpu)) {
/* we have completed ioreq pending */
emulate_io_post(vcpu);
break;
}
asm_pause();
}
}
return 0;
}

View File

@@ -296,15 +296,6 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
*/
void reset_vm_ioreqs(struct acrn_vm *vm);
/**
* @brief Handle completed ioreq if any one pending
*
* @param pcpu_id The physical cpu id of vcpu whose IO request to be checked
*
* @return None
*/
void handle_complete_ioreq(uint16_t pcpu_id);
/**
* @brief Get the state of VHM request
*