mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-08 20:29:40 +00:00
dm: block_if: add the io_uring support
io_uring is a high-performance asynchronous I/O framework, primarily designed to improve the efficiency of input and output (I/O) operations in user-space applications. This patch enables io_uring in block_if module. It utilizes the interfaces provided by the user-space library `liburing` to interact with io_uring in kernel-space. To build the acrn-dm with io_uring support, `liburing-dev` package needs to be installed. For example, it can be installed like below in Ubuntu 22.04. sudo apt install liburing-dev In order to support both the thread pool mechanism and the io_uring mechanism, an acrn-dm option `aio` is introduced. By default, thread pool mechanism is selected. - Example to use io_uring: `add_virtual_device 9 virtio-blk iothread,mq=2,/dev/nvme1n1,writeback,aio=io_uring` - Example to use thread pool: `add_virtual_device 9 virtio-blk iothread,mq=2,/dev/nvme1n1,writeback,aio=threads` - Example to use thread pool (by default): `add_virtual_device 9 virtio-blk iothread,mq=2,/dev/nvme1n1,writeback` v2 -> v3: * Update iothread_handler - Use the unified eventfd interfaces to read the counter value of the ioeventfd. - Remove the while loop to read the ioeventfd. It is not necessary because one read would reset the counter value to 0. * Update iou_submit_sqe to return an error code The caller of iou_submit_sqe shall check the return value. If there is NO available submission queue entry in the submission queue, need to break the while loop. Request can only be submitted when SQE is available. v1 -> v2: * move the logic of reading out ioeventfd from iothread.c to virtio.c, because it is specific to the virtqueue handling. Tracked-On: #8612 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
committed by
acrnsi-robot
parent
edb392e7ed
commit
fed8ce513c
@@ -21,7 +21,7 @@
|
||||
|
||||
|
||||
#define MEVENT_MAX 64
|
||||
#define MAX_EVENT_NUM 64
|
||||
|
||||
struct iothread_ctx {
|
||||
pthread_t tid;
|
||||
int epfd;
|
||||
@@ -35,25 +35,22 @@ io_thread(void *arg)
|
||||
{
|
||||
struct epoll_event eventlist[MEVENT_MAX];
|
||||
struct iothread_mevent *aevp;
|
||||
int i, n, status;
|
||||
char buf[MAX_EVENT_NUM];
|
||||
int i, n;
|
||||
|
||||
while(ioctx.started) {
|
||||
n = epoll_wait(ioctx.epfd, eventlist, MEVENT_MAX, -1);
|
||||
if (n < 0) {
|
||||
if (errno == EINTR)
|
||||
pr_info("%s: exit from epoll_wait\n", __func__);
|
||||
else
|
||||
if (errno == EINTR) {
|
||||
/* EINTR may happen when io_uring fd is monitored, it is harmless. */
|
||||
continue;
|
||||
} else {
|
||||
pr_err("%s: return from epoll wait with errno %d\r\n", __func__, errno);
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
aevp = eventlist[i].data.ptr;
|
||||
if (aevp && aevp->run) {
|
||||
/* Mitigate the epoll_wait repeat cycles by reading out the events as more as possible.*/
|
||||
do {
|
||||
status = read(aevp->fd, buf, sizeof(buf));
|
||||
} while (status == MAX_EVENT_NUM);
|
||||
(*aevp->run)(aevp->arg);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user