hv: remove unnecessary cancel_event_injection related stuff

cancel_event_injection is not need any more if we do 'scheudle' prior to
acrn_handle_pending_request. Commit "921288a6672: hv: fix interrupt
lost when do acrn_handle_pending_request twice" bring 'schedule'
forward, so remove cancel_event_injection related stuff.

Tracked-On: #3374
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Shuo A Liu 2019-07-01 14:40:54 +08:00 committed by wenlingz
parent ea849177a5
commit 4129b72b2e
4 changed files with 20 additions and 74 deletions

View File

@ -579,7 +579,6 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
vcpu->arch.exception_info.exception = VECTOR_INVALID; vcpu->arch.exception_info.exception = VECTOR_INVALID;
vcpu->arch.cur_context = NORMAL_WORLD; vcpu->arch.cur_context = NORMAL_WORLD;
vcpu->arch.irq_window_enabled = false; vcpu->arch.irq_window_enabled = false;
vcpu->arch.inject_event_pending = false;
(void)memset((void *)vcpu->arch.vmcs, 0U, PAGE_SIZE); (void)memset((void *)vcpu->arch.vmcs, 0U, PAGE_SIZE);
for (i = 0; i < NR_WORLD; i++) { for (i = 0; i < NR_WORLD; i++) {
@ -646,9 +645,6 @@ static void context_switch_out(struct sched_object *prev)
{ {
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj); struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj);
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store32(&vcpu->running, 0U); atomic_store32(&vcpu->running, 0U);
/* do prev vcpu context switch out */ /* do prev vcpu context switch out */
/* For now, we don't need to invalid ept. /* For now, we don't need to invalid ept.

View File

@ -365,8 +365,6 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
bool injected = false; bool injected = false;
int32_t ret = 0; int32_t ret = 0;
uint32_t tmp; uint32_t tmp;
uint32_t intr_info;
uint32_t error_code;
struct acrn_vcpu_arch *arch = &vcpu->arch; struct acrn_vcpu_arch *arch = &vcpu->arch;
uint64_t *pending_req_bits = &arch->pending_req; uint64_t *pending_req_bits = &arch->pending_req;
@ -387,19 +385,6 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
vcpu_set_vmcs_eoi_exit(vcpu); vcpu_set_vmcs_eoi_exit(vcpu);
} }
/* handling cancelled event injection when vcpu is switched out */
if (arch->inject_event_pending) {
if ((arch->inject_info.intr_info & (EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
error_code = arch->inject_info.error_code;
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE, error_code);
}
intr_info = arch->inject_info.intr_info;
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, intr_info);
arch->inject_event_pending = false;
injected = true;
} else {
/* SDM Vol 3 - table 6-2, inject high priority exception before /* SDM Vol 3 - table 6-2, inject high priority exception before
* maskable hardware interrupt */ * maskable hardware interrupt */
injected = vcpu_inject_hi_exception(vcpu); injected = vcpu_inject_hi_exception(vcpu);
@ -425,7 +410,6 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
} }
} }
} }
}
if (!acrn_inject_pending_intr(vcpu, pending_req_bits, injected)) { if (!acrn_inject_pending_intr(vcpu, pending_req_bits, injected)) {
/* if there is no eligible vector before this point */ /* if there is no eligible vector before this point */
@ -485,31 +469,6 @@ static inline bool acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
return ret; return ret;
} }
void cancel_event_injection(struct acrn_vcpu *vcpu)
{
uint32_t intinfo;
intinfo = exec_vmread32(VMX_ENTRY_INT_INFO_FIELD);
/*
* If event is injected, we clear VMX_ENTRY_INT_INFO_FIELD,
* save injection info, and mark inject event pending.
* The event will be re-injected in next acrn_handle_pending_request
* call.
*/
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
vcpu->arch.inject_event_pending = true;
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
vcpu->arch.inject_info.error_code =
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
}
vcpu->arch.inject_info.intr_info = intinfo;
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
}
}
/* /*
* @pre vcpu != NULL * @pre vcpu != NULL
*/ */

View File

@ -264,11 +264,6 @@ struct ext_context {
#define EOI_EXIT_BITMAP_SIZE 256U #define EOI_EXIT_BITMAP_SIZE 256U
struct event_injection_info {
uint32_t intr_info;
uint32_t error_code;
};
struct cpu_context { struct cpu_context {
struct run_context run_ctx; struct run_context run_ctx;
struct ext_context ext_ctx; struct ext_context ext_ctx;
@ -339,8 +334,6 @@ struct acrn_vcpu_arch {
/* interrupt injection information */ /* interrupt injection information */
uint64_t pending_req; uint64_t pending_req;
bool inject_event_pending;
struct event_injection_info inject_info;
/* List of MSRS to be stored and loaded on VM exits or VM entries */ /* List of MSRS to be stored and loaded on VM exits or VM entries */
struct msr_store_area msr_area; struct msr_store_area msr_area;

View File

@ -210,8 +210,6 @@ int32_t interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu); int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu); int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu);
void cancel_event_injection(struct acrn_vcpu *vcpu);
extern uint64_t irq_alloc_bitmap[IRQ_ALLOC_BITMAP_SIZE]; extern uint64_t irq_alloc_bitmap[IRQ_ALLOC_BITMAP_SIZE];
typedef void (*irq_action_t)(uint32_t irq, void *priv_data); typedef void (*irq_action_t)(uint32_t irq, void *priv_data);