hv: vcpu: wait and signal vcpu event support

Introduce two kinds of events for each vcpu,
  VCPU_EVENT_IOREQ: for vcpu waiting for IO request completion
  VCPU_EVENT_VIRTUAL_INTERRUPT: for vcpu waiting for virtual interrupts events
vcpu can wait for such events, and resume to run when the
event get signalled.

This patch also change IO request waiting/notifying to this way.

Tracked-On: #4329
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2019-12-31 13:00:13 +08:00 committed by wenlingz
parent 9927b021c5
commit 8e74c2cfd5
4 changed files with 14 additions and 17 deletions

View File

@ -799,7 +799,7 @@ void launch_vcpu(struct acrn_vcpu *vcpu)
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
int32_t ret;
int32_t ret, i;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];
@ -815,6 +815,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
init_thread_data(&vcpu->thread_obj);
for (i = 0; i < VCPU_EVENT_NUM; i++) {
init_event(&vcpu->events[i]);
}
}
return ret;

View File

@ -553,12 +553,10 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
__func__, vcpu_id, target_vm->vm_id);
} else {
vcpu = vcpu_from_vid(target_vm, vcpu_id);
if (vcpu->state == VCPU_PAUSED) {
if (!vcpu->vm->sw.is_completion_polling) {
resume_vcpu(vcpu);
}
ret = 0;
if (!vcpu->vm->sw.is_completion_polling) {
signal_event(&vcpu->events[VCPU_EVENT_IOREQ]);
}
ret = 0;
}
}

View File

@ -103,14 +103,6 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
}
clac();
/* pause vcpu in notification mode , wait for VHM to handle the MMIO request.
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
* should fix the race issue between req.processed update and vcpu pause
*/
if (!is_polling) {
pause_vcpu(vcpu, VCPU_PAUSED);
}
/* Before updating the vhm_req state, enforce all fill vhm_req operations done */
cpu_write_memory_barrier();
@ -136,10 +128,8 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
schedule();
}
}
} else if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule();
} else {
ret = -EINVAL;
wait_event(&vcpu->events[VCPU_EVENT_IOREQ]);
}
} else {
ret = -EINVAL;

View File

@ -22,6 +22,7 @@
#include <vlapic.h>
#include <vmtrr.h>
#include <schedule.h>
#include <event.h>
#include <io_req.h>
#include <msr.h>
#include <cpu.h>
@ -146,6 +147,9 @@ enum vm_cpu_mode {
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
#define VCPU_EVENT_IOREQ 0
#define VCPU_EVENT_VIRTUAL_INTERRUPT 1
#define VCPU_EVENT_NUM 2
/* 2 worlds: 0 for Normal World, 1 for Secure World */
#define NR_WORLD 2
@ -260,6 +264,8 @@ struct acrn_vcpu {
uint64_t reg_cached;
uint64_t reg_updated;
struct sched_event events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);
struct vcpu_dump {