diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index c0f34d9b8..266e1543b 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -799,7 +799,7 @@ void launch_vcpu(struct acrn_vcpu *vcpu) /* help function for vcpu create */ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id) { - int32_t ret; + int32_t ret, i; struct acrn_vcpu *vcpu = NULL; char thread_name[16]; @@ -815,6 +815,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id) vcpu->thread_obj.switch_out = context_switch_out; vcpu->thread_obj.switch_in = context_switch_in; init_thread_data(&vcpu->thread_obj); + for (i = 0; i < VCPU_EVENT_NUM; i++) { + init_event(&vcpu->events[i]); + } } return ret; diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index c11404080..394f405f9 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -553,12 +553,10 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id) __func__, vcpu_id, target_vm->vm_id); } else { vcpu = vcpu_from_vid(target_vm, vcpu_id); - if (vcpu->state == VCPU_PAUSED) { - if (!vcpu->vm->sw.is_completion_polling) { - resume_vcpu(vcpu); - } - ret = 0; + if (!vcpu->vm->sw.is_completion_polling) { + signal_event(&vcpu->events[VCPU_EVENT_IOREQ]); } + ret = 0; } } diff --git a/hypervisor/dm/io_req.c b/hypervisor/dm/io_req.c index d0ce332e3..e113c0c84 100644 --- a/hypervisor/dm/io_req.c +++ b/hypervisor/dm/io_req.c @@ -103,14 +103,6 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_ } clac(); - /* pause vcpu in notification mode , wait for VHM to handle the MMIO request. - * TODO: when pause_vcpu changed to switch vcpu out directlly, we - * should fix the race issue between req.processed update and vcpu pause - */ - if (!is_polling) { - pause_vcpu(vcpu, VCPU_PAUSED); - } - /* Before updating the vhm_req state, enforce all fill vhm_req operations done */ cpu_write_memory_barrier(); @@ -136,10 +128,8 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_ schedule(); } } - } else if (need_reschedule(pcpuid_from_vcpu(vcpu))) { - schedule(); } else { - ret = -EINVAL; + wait_event(&vcpu->events[VCPU_EVENT_IOREQ]); } } else { ret = -EINVAL; diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index bfee6e4b6..9fd906910 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -146,6 +147,9 @@ enum vm_cpu_mode { CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ }; +#define VCPU_EVENT_IOREQ 0 +#define VCPU_EVENT_VIRTUAL_INTERRUPT 1 +#define VCPU_EVENT_NUM 2 /* 2 worlds: 0 for Normal World, 1 for Secure World */ #define NR_WORLD 2 @@ -260,6 +264,8 @@ struct acrn_vcpu { uint64_t reg_cached; uint64_t reg_updated; + + struct sched_event events[VCPU_EVENT_NUM]; } __aligned(PAGE_SIZE); struct vcpu_dump {