diff --git a/hypervisor/arch/x86/guest/lock_instr_emul.c b/hypervisor/arch/x86/guest/lock_instr_emul.c index 8729e3369..10947a424 100644 --- a/hypervisor/arch/x86/guest/lock_instr_emul.c +++ b/hypervisor/arch/x86/guest/lock_instr_emul.c @@ -60,25 +60,7 @@ void vcpu_complete_lock_instr_emulation(struct acrn_vcpu *cur_vcpu) if (cur_vcpu->vm->hw.created_vcpus > 1U) { foreach_vcpu(i, cur_vcpu->vm, other) { if ((other != cur_vcpu) && (other->state == VCPU_RUNNING)) { - /* - * When we vcpu_make_request above, there is a time window between kick_vcpu and - * the target vcpu actually does acrn_handle_pending_request (and eventually wait_event). - * It is possible that the issuing vcpu has already completed lock emulation and - * calls signal_event, while the target vcpu has not yet reaching acrn_handle_pending_request. - * - * This causes problem: Say we have vcpuA make request to vcpuB. - * During the above said time window, if A does kick_lock/complete_lock pair multiple times, - * or some other vcpu C makes request to B after A releases the vm lock below, then the bit - * in B's pending_req will be cleared only once, and B will call wait_event only once, - * while other vcpu may call signal_event many times to B. I.e., one bit is not enough - * to cache multiple requests. - * - * To work this around, we try to cancel the request (clear the bit in pending_req) if it - * is unhandled, and signal_event only when the request was already handled. - */ - if (!vcpu_try_cancel_request(other, ACRN_REQUEST_SPLIT_LOCK)) { - signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]); - } + signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]); } } diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c index 02c3c5dd9..014094014 100644 --- a/hypervisor/arch/x86/guest/virq.c +++ b/hypervisor/arch/x86/guest/virq.c @@ -133,12 +133,6 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid) kick_vcpu(vcpu); } -/* Return true if an unhandled request is cancelled, false otherwise. */ -bool vcpu_try_cancel_request(struct acrn_vcpu *vcpu, uint16_t eventid) -{ - return bitmap_test_and_clear_lock(eventid, &vcpu->arch.pending_req); -} - /* * @retval true when INT is injected to guest. * @retval false when otherwise diff --git a/hypervisor/common/event.c b/hypervisor/common/event.c index cf3ce7099..eb1fc88a0 100644 --- a/hypervisor/common/event.c +++ b/hypervisor/common/event.c @@ -11,7 +11,7 @@ void init_event(struct sched_event *event) { spinlock_init(&event->lock); - event->nqueued = 0; + event->set = false; event->waiting_thread = NULL; } @@ -20,7 +20,7 @@ void reset_event(struct sched_event *event) uint64_t rflag; spinlock_irqsave_obtain(&event->lock, &rflag); - event->nqueued = 0; + event->set = false; event->waiting_thread = NULL; spinlock_irqrestore_release(&event->lock, rflag); } @@ -33,13 +33,13 @@ void wait_event(struct sched_event *event) spinlock_irqsave_obtain(&event->lock, &rflag); ASSERT((event->waiting_thread == NULL), "only support exclusive waiting"); event->waiting_thread = sched_get_current(get_pcpu_id()); - event->nqueued++; - while ((event->nqueued > 0) && (event->waiting_thread != NULL)) { + while (!event->set && (event->waiting_thread != NULL)) { sleep_thread(event->waiting_thread); spinlock_irqrestore_release(&event->lock, rflag); schedule(); spinlock_irqsave_obtain(&event->lock, &rflag); } + event->set = false; event->waiting_thread = NULL; spinlock_irqrestore_release(&event->lock, rflag); } @@ -49,7 +49,7 @@ void signal_event(struct sched_event *event) uint64_t rflag; spinlock_irqsave_obtain(&event->lock, &rflag); - event->nqueued--; + event->set = true; if (event->waiting_thread != NULL) { wake_thread(event->waiting_thread); } diff --git a/hypervisor/include/arch/x86/asm/guest/virq.h b/hypervisor/include/arch/x86/asm/guest/virq.h index 4e962e455..00cace991 100644 --- a/hypervisor/include/arch/x86/asm/guest/virq.h +++ b/hypervisor/include/arch/x86/asm/guest/virq.h @@ -103,7 +103,6 @@ void vcpu_inject_ud(struct acrn_vcpu *vcpu); */ void vcpu_inject_ss(struct acrn_vcpu *vcpu); void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid); -bool vcpu_try_cancel_request(struct acrn_vcpu *vcpu, uint16_t eventid); /* * @pre vcpu != NULL diff --git a/hypervisor/include/common/event.h b/hypervisor/include/common/event.h index 2aba27b32..78430b21e 100644 --- a/hypervisor/include/common/event.h +++ b/hypervisor/include/common/event.h @@ -4,7 +4,7 @@ struct sched_event { spinlock_t lock; - int8_t nqueued; + bool set; struct thread_object* waiting_thread; };