From fb41ea5cfbd4511bfae66a1d3adbe1333c7f7683 Mon Sep 17 00:00:00 2001 From: Jason Chen CJ Date: Mon, 21 Jan 2019 14:08:39 +0800 Subject: [PATCH] io_emul: remove pending_pre_work Now we do not need pending_pre_work anymore, as we can make sure IO request VCPU resume from where it paused. Now only three fixed points will try to do schedule: - vcpu_thread: before vm entry, will check reschedule flag and to it if needed - default_idle: loop check reschedule flag to see if need switch out - io request: if IO REQ need DM's handle, it will schedule out Tracked-On: #2394 Signed-off-by: Jason Chen CJ Acked-by: Xu, Anthony --- hypervisor/arch/x86/guest/vcpu.c | 7 ------- hypervisor/arch/x86/io_emul.c | 13 +------------ hypervisor/common/hv_main.c | 19 ------------------- hypervisor/common/io_req.c | 2 ++ hypervisor/include/arch/x86/guest/vcpu.h | 5 ----- 5 files changed, 3 insertions(+), 43 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 560fe7665..d028587a0 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -383,7 +383,6 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn vcpu->paused_cnt = 0U; vcpu->running = 0; vcpu->arch.nr_sipi = 0; - vcpu->pending_pre_work = 0U; vcpu->state = VCPU_INIT; reset_vcpu_regs(vcpu); @@ -535,7 +534,6 @@ void reset_vcpu(struct acrn_vcpu *vcpu) vcpu->paused_cnt = 0U; vcpu->running = 0; vcpu->arch.nr_sipi = 0; - vcpu->pending_pre_work = 0U; vcpu->arch.exception_info.exception = VECTOR_INVALID; vcpu->arch.cur_context = NORMAL_WORLD; @@ -679,8 +677,3 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id) return ret; } - -void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id) -{ - bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work); -} diff --git a/hypervisor/arch/x86/io_emul.c b/hypervisor/arch/x86/io_emul.c index 657f38c7e..3956af2de 100644 --- a/hypervisor/arch/x86/io_emul.c +++ b/hypervisor/arch/x86/io_emul.c @@ -146,18 +146,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu) } else { switch (vcpu->req.type) { case REQ_MMIO: - /* - * In IO completion polling mode, the post work of IO emulation will - * be running on its own pcpu, then we can do MMIO post work directly; - * While in notification mode, the post work of IO emulation will be - * running on SOS pcpu, then we need request_vcpu_pre_work and let - * its own pcpu get scheduled and finish the MMIO post work. - */ - if (!vcpu->vm->sw.is_completion_polling) { - request_vcpu_pre_work(vcpu, ACRN_VCPU_MMIO_COMPLETE); - } else { - dm_emulate_mmio_post(vcpu); - } + dm_emulate_mmio_post(vcpu); break; case REQ_PORTIO: diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 34ec081b4..30dc0b630 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -8,23 +8,12 @@ #include #include -static void run_vcpu_pre_work(struct acrn_vcpu *vcpu) -{ - uint64_t *pending_pre_work = &vcpu->pending_pre_work; - - if (bitmap_test_and_clear_lock(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work)) { - dm_emulate_mmio_post(vcpu); - } -} - void vcpu_thread(struct sched_object *obj) { struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, sched_obj); uint32_t basic_exit_reason = 0U; int32_t ret = 0; - run_vcpu_pre_work(vcpu); - do { /* If vcpu is not launched, we need to do init_vmcs first */ if (!vcpu->launched) { @@ -46,15 +35,7 @@ void vcpu_thread(struct sched_object *obj) } if (need_reschedule(vcpu->pcpu_id)) { - /* - * In extrem case, schedule() could return. Which - * means the vcpu resume happens before schedule() - * triggered by vcpu suspend. In this case, we need - * to do pre work and continue vcpu loop after - * schedule() is return. - */ schedule(); - run_vcpu_pre_work(vcpu); continue; } diff --git a/hypervisor/common/io_req.c b/hypervisor/common/io_req.c index 9776fe1e5..48aac6d3f 100644 --- a/hypervisor/common/io_req.c +++ b/hypervisor/common/io_req.c @@ -154,6 +154,8 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request } asm_pause(); } + } else if (need_reschedule(vcpu->pcpu_id)) { + schedule(); } ret = 0; } else { diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index ec28fa703..54972f507 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -13,8 +13,6 @@ #ifndef VCPU_H #define VCPU_H -#define ACRN_VCPU_MMIO_COMPLETE (0U) - /* Number of GPRs saved / restored for guest in VCPU structure */ #define NUM_GPRS 16U #define GUEST_STATE_AREA_SIZE 512 @@ -274,7 +272,6 @@ struct acrn_vcpu { uint64_t sync; /*hold the bit events*/ struct sched_object sched_obj; - uint64_t pending_pre_work; /* any pre work pending? */ bool launched; /* Whether the vcpu is launched on target pcpu */ uint32_t paused_cnt; /* how many times vcpu is paused */ uint32_t running; /* vcpu is picked up and run? */ @@ -606,8 +603,6 @@ void schedule_vcpu(struct acrn_vcpu *vcpu); */ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id); -void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id); - /** * @} */