hv: Add IO request completion polling feature

This patch introduce a new mode of IO request completion, polling mode.

Now, the sketch of ioreq process can be,
  A. UOS vcpu0 generate PIO/MMIO ->
   B. pcpu1(vcpu0 of UOS) trap into HV ->
    C. pcpu1 build ioreq, send IPI and enter idle ->
     D.1 pcpu0(vcpu0 of SOS) response IPI,
     D.2 pcpu0 handle the ioreq in HV, kernel, DM,
     D.3 pcpu0 mark ioreq as complete,
     D.4 pcpu0 hypercall to enter HV ->
       E.1 pcpu0 send IPI to wake pcpu1 up
       E.2 UOS vcpu0 continue running

With this change, it skips D.4, E.1 steps. In step C, pcpu1 will enter a
polling ioreq state idle after send out the IPI.
It can save about ~5000 cpu cycles.

In polling mode, we do the polling in idle instead of pause cpu all the
time. It will consume more power. A better way is to use monitor/mwait
instructions which can put cpu into a sleep state with monitoring a
memory address. Unfortunately, APL has bug with monitor. We can gather
all ioreqs state into one monitorable memory and take advantage of
monitor/mwait for future platform.

The way polling or notification is per VM. We can config VMs in
different mode. By default, IO request completion will use notification
mode for all VMs. We can switch it by Kconfig.

Tracked-On: #1821
Signed-off-by: Shuo Liu <shuo.a.liu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Shuo Liu 2018-10-11 14:10:30 +08:00 committed by lijinxia
parent e350abe40d
commit e8296dce05
8 changed files with 103 additions and 4 deletions

View File

@ -173,11 +173,18 @@ struct vhm_request {
uint32_t type;
/**
* @brief Reserved.
* @brief Hypervisor will poll completion if set.
*
* Byte offset: 4.
*/
uint32_t reserved0[15];
uint32_t completion_polling;
/**
* @brief Reserved.
*
* Byte offset: 8.
*/
uint32_t reserved0[14];
/**
* @brief Details about this request.

View File

@ -40,6 +40,28 @@ config PARTITION_MODE
endchoice
choice
prompt "I/O emulation completion mode"
default IOREQ_NOTIFICATION
help
Select the mode of I/O emulation completion
config IOREQ_NOTIFICATION
bool "Notification mode"
help
When I/O request is completed, SOS will mark the completion status and
notify hypervisor via hypercall. Hypervisor will finish the post work
when notification is received.
config IOREQ_POLLING
bool "Polling mode"
help
When I/O request is completed, SOS will only mark completion status
without notifying hypervisor. Hypervisor will poll the completion
status and finish the post work.
endchoice
config PLATFORM
string
default "uefi" if PLATFORM_UEFI

View File

@ -162,6 +162,10 @@ int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
/* Populate return VM handle */
*rtn_vm = vm;
vm->sw.io_shared_page = NULL;
#ifdef CONFIG_IOREQ_POLLING
/* Now, enable IO completion polling mode for all VMs with CONFIG_IOREQ_POLLING. */
vm->sw.is_completion_polling = true;
#endif
status = set_vcpuid_entries(vm);
if (status != 0) {

View File

@ -74,6 +74,50 @@ void reset_vm_ioreqs(struct acrn_vm *vm)
}
}
static bool has_complete_ioreq(struct acrn_vcpu *vcpu)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;
struct acrn_vm *vm;
vm = vcpu->vm;
req_buf = (union vhm_request_buffer *)vm->sw.io_shared_page;
if (req_buf != NULL) {
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
if (vhm_req->valid &&
atomic_load32(&vhm_req->processed)
== REQ_STATE_COMPLETE) {
return true;
}
}
return false;
}
/**
* @brief Handle completed ioreq if any one pending
*
* @param pcpu_id The physical cpu id of vcpu whose IO request to be checked
*
* @return N/A
*/
void handle_complete_ioreq(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
struct acrn_vm *vm;
if (vcpu != NULL) {
vm = vcpu->vm;
if (vm->sw.is_completion_polling) {
if (has_complete_ioreq(vcpu)) {
/* we have completed ioreq pending */
emulate_io_post(vcpu);
}
}
}
}
/**
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
*
@ -103,6 +147,9 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
vhm_req->type = io_req->type;
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
&io_req->reqs, sizeof(union vhm_io_request));
if (vcpu->vm->sw.is_completion_polling) {
vhm_req->completion_polling = 1U;
}
/* pause vcpu, wait for VHM to handle the MMIO request.
* TODO: when pause_vcpu changed to switch vcpu out directlly, we

View File

@ -178,6 +178,7 @@ void default_idle(void)
cpu_dead(pcpu_id);
} else {
CPU_IRQ_ENABLE();
handle_complete_ioreq(pcpu_id);
cpu_do_idle();
CPU_IRQ_DISABLE();
}

View File

@ -55,6 +55,8 @@ struct vm_sw_info {
struct sw_linux linux_info;
/* HVA to IO shared page */
void *io_shared_page;
/* If enable IO completion polling mode */
bool is_completion_polling;
};
struct vm_pm_info {

View File

@ -296,6 +296,15 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
*/
void reset_vm_ioreqs(struct acrn_vm *vm);
/**
* @brief Handle completed ioreq if any one pending
*
* @param pcpu_id The physical cpu id of vcpu whose IO request to be checked
*
* @return N/A
*/
void handle_complete_ioreq(uint16_t pcpu_id);
/**
* @}
*/

View File

@ -268,11 +268,18 @@ struct vhm_request {
uint32_t type;
/**
* Reserved.
* @brief Hypervisor will poll completion if set.
*
* Byte offset: 4.
*/
uint32_t reserved0[15];
uint32_t completion_polling;
/**
* Reserved.
*
* Byte offset: 8.
*/
uint32_t reserved0[14];
/**
* Details about this request. For REQ_PORTIO, this has type