io_emul: remove pending_pre_work

Now we do not need pending_pre_work anymore, as we can make sure IO request
VCPU resume from where it paused.

Now only three fixed points will try to do schedule:
- vcpu_thread: before vm entry, will check reschedule flag and to it if needed
- default_idle: loop check reschedule flag to see if need switch out
- io request: if IO REQ need DM's handle, it will schedule out

Tracked-On: #2394
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Xu, Anthony <anthony.xu@intel.com>
This commit is contained in:
Jason Chen CJ 2019-01-21 14:08:39 +08:00 committed by wenlingz
parent 4fc54f952e
commit fb41ea5cfb
5 changed files with 3 additions and 43 deletions

View File

@ -383,7 +383,6 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
vcpu->paused_cnt = 0U;
vcpu->running = 0;
vcpu->arch.nr_sipi = 0;
vcpu->pending_pre_work = 0U;
vcpu->state = VCPU_INIT;
reset_vcpu_regs(vcpu);
@ -535,7 +534,6 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
vcpu->paused_cnt = 0U;
vcpu->running = 0;
vcpu->arch.nr_sipi = 0;
vcpu->pending_pre_work = 0U;
vcpu->arch.exception_info.exception = VECTOR_INVALID;
vcpu->arch.cur_context = NORMAL_WORLD;
@ -679,8 +677,3 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
return ret;
}
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id)
{
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
}

View File

@ -146,18 +146,7 @@ void emulate_io_post(struct acrn_vcpu *vcpu)
} else {
switch (vcpu->req.type) {
case REQ_MMIO:
/*
* In IO completion polling mode, the post work of IO emulation will
* be running on its own pcpu, then we can do MMIO post work directly;
* While in notification mode, the post work of IO emulation will be
* running on SOS pcpu, then we need request_vcpu_pre_work and let
* its own pcpu get scheduled and finish the MMIO post work.
*/
if (!vcpu->vm->sw.is_completion_polling) {
request_vcpu_pre_work(vcpu, ACRN_VCPU_MMIO_COMPLETE);
} else {
dm_emulate_mmio_post(vcpu);
}
break;
case REQ_PORTIO:

View File

@ -8,23 +8,12 @@
#include <schedule.h>
#include <softirq.h>
static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
{
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
if (bitmap_test_and_clear_lock(ACRN_VCPU_MMIO_COMPLETE, pending_pre_work)) {
dm_emulate_mmio_post(vcpu);
}
}
void vcpu_thread(struct sched_object *obj)
{
struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
uint32_t basic_exit_reason = 0U;
int32_t ret = 0;
run_vcpu_pre_work(vcpu);
do {
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched) {
@ -46,15 +35,7 @@ void vcpu_thread(struct sched_object *obj)
}
if (need_reschedule(vcpu->pcpu_id)) {
/*
* In extrem case, schedule() could return. Which
* means the vcpu resume happens before schedule()
* triggered by vcpu suspend. In this case, we need
* to do pre work and continue vcpu loop after
* schedule() is return.
*/
schedule();
run_vcpu_pre_work(vcpu);
continue;
}

View File

@ -154,6 +154,8 @@ int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request
}
asm_pause();
}
} else if (need_reschedule(vcpu->pcpu_id)) {
schedule();
}
ret = 0;
} else {

View File

@ -13,8 +13,6 @@
#ifndef VCPU_H
#define VCPU_H
#define ACRN_VCPU_MMIO_COMPLETE (0U)
/* Number of GPRs saved / restored for guest in VCPU structure */
#define NUM_GPRS 16U
#define GUEST_STATE_AREA_SIZE 512
@ -274,7 +272,6 @@ struct acrn_vcpu {
uint64_t sync; /*hold the bit events*/
struct sched_object sched_obj;
uint64_t pending_pre_work; /* any pre work pending? */
bool launched; /* Whether the vcpu is launched on target pcpu */
uint32_t paused_cnt; /* how many times vcpu is paused */
uint32_t running; /* vcpu is picked up and run? */
@ -606,8 +603,6 @@ void schedule_vcpu(struct acrn_vcpu *vcpu);
*/
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id);
/**
* @}
*/