hv: vlapic: wrap APICv inject interrupt API

apicv_advanced_inject_intr is used if APICv fully features are supported,
it uses PIR to inject interrupt. otherwise, apicv_basic_inject_intr is used.
it will use VMCS INTR INFO field to inject irq.

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2019-03-21 00:22:24 +08:00 committed by wenlingz
parent 1db8123c2d
commit 037fffc203
5 changed files with 51 additions and 76 deletions

View File

@ -94,9 +94,6 @@ an interrupt, for example:
These APIs will finish by making a request for *ACRN_REQUEST_EVENT.*
.. doxygenfunction:: vlapic_get_deliverable_intr
:project: Project ACRN
.. doxygenfunction:: vlapic_find_deliverable_intr
:project: Project ACRN

View File

@ -146,35 +146,6 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
}
}
/*
* This function is only for case that APICv/VID is not supported.
*
* @retval true when INT is injected to guest.
* @retval false when there is no eligible pending vector.
*/
static bool vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
uint32_t vector = 0U;
bool injected = false;
if (vlapic_find_deliverable_intr(vlapic, &vector)) {
/*
* From the Intel SDM, Volume 3, 6.3.2 Section "Maskable
* Hardware Interrupts":
* - maskable interrupt vectors [16,255] can be delivered
* through the local APIC.
*/
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID | vector);
vlapic_get_deliverable_intr(vlapic, vector);
injected = true;
}
vlapic_update_tpr_threshold(vlapic);
return injected;
}
/*
* @retval true when INT is injected to guest.
* @retval false when otherwise
@ -529,7 +500,6 @@ static inline bool acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
uint64_t *pending_req_bits, bool injected)
{
bool ret = injected;
struct acrn_vlapic *vlapic;
bool guest_irq_enabled = is_guest_irq_enabled(vcpu);
if (guest_irq_enabled && (!ret)) {
@ -541,27 +511,7 @@ static inline bool acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
}
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, pending_req_bits)) {
/*
* From SDM Vol3 26.3.2.5:
* Once the virtual interrupt is recognized, it will be delivered
* in VMX non-root operation immediately after VM entry(including
* any specified event injection) completes.
*
* So the hardware can handle vmcs event injection and
* evaluation/delivery of apicv virtual interrupts in one time
* vm-entry.
*
* Here to sync the pending interrupts to irr and update rvi if
* needed. And then try to handle vmcs event injection.
*/
if (is_apicv_advanced_feature_supported()) {
vlapic = vcpu_vlapic(vcpu);
vlapic_apicv_inject_pir(vlapic);
} else {
if (guest_irq_enabled && (!ret)) {
ret = vcpu_inject_vlapic_int(vcpu);
}
}
ret = vlapic_inject_intr(vcpu_vlapic(vcpu), guest_irq_enabled, ret);
}
return ret;

View File

@ -1383,8 +1383,7 @@ bool vlapic_find_deliverable_intr(const struct acrn_vlapic *vlapic, uint32_t *ve
*
* @pre vlapic != NULL
*/
void
vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vector)
static void vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vector)
{
struct lapic_regs *lapic = &(vlapic->apic_page);
struct lapic_reg *irrptr, *isrptr;
@ -2202,13 +2201,29 @@ vlapic_apicv_get_apic_page_addr(struct acrn_vlapic *vlapic)
return hva2hpa(&(vlapic->apic_page));
}
static bool apicv_basic_inject_intr(struct acrn_vlapic *vlapic,
bool guest_irq_enabled, bool injected)
{
uint32_t vector = 0U;
bool ret = injected;
if (guest_irq_enabled && (!injected)) {
if (vlapic_find_deliverable_intr(vlapic, &vector)) {
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID | vector);
vlapic_get_deliverable_intr(vlapic, vector);
ret = true;
}
}
vlapic_update_tpr_threshold(vlapic);
return ret;
}
/*
* Transfer the pending interrupts in the PIR descriptor to the IRR
* in the virtual APIC page.
*/
void
vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
static void vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
{
struct vlapic_pir_desc *pir_desc;
struct lapic_regs *lapic;
@ -2270,6 +2285,32 @@ vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
}
}
static bool apicv_advanced_inject_intr(struct acrn_vlapic *vlapic,
__unused bool guest_irq_enabled, bool injected)
{
/*
* From SDM Vol3 26.3.2.5:
* Once the virtual interrupt is recognized, it will be delivered
* in VMX non-root operation immediately after VM entry(including
* any specified event injection) completes.
*
* So the hardware can handle vmcs event injection and
* evaluation/delivery of apicv virtual interrupts in one time
* vm-entry.
*
* Here to sync the pending interrupts to irr and update rvi if
* needed. And then try to handle vmcs event injection.
*/
vlapic_apicv_inject_pir(vlapic);
return injected;
}
bool vlapic_inject_intr(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected)
{
return apicv_ops->inject_intr(vlapic, guest_irq_enabled, injected);
}
int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
{
int32_t err = 0;
@ -2461,10 +2502,12 @@ int32_t tpr_below_threshold_vmexit_handler(struct acrn_vcpu *vcpu)
static const struct acrn_apicv_ops apicv_basic_ops = {
.accept_intr = apicv_basic_accept_intr,
.inject_intr = apicv_basic_inject_intr,
};
static const struct acrn_apicv_ops apicv_advanced_ops = {
.accept_intr = apicv_advanced_accept_intr,
.inject_intr = apicv_advanced_inject_intr,
};
/*

View File

@ -84,6 +84,7 @@
struct acrn_apicv_ops {
void (*accept_intr)(struct acrn_vlapic *vlapic, uint32_t vector, bool level);
bool (*inject_intr)(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
};
#endif /* VLAPIC_PRIV_H */

View File

@ -135,22 +135,7 @@ void vlapic_set_apicv_ops(void);
*/
bool vlapic_find_deliverable_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr);
/**
* @brief Get a deliverable virtual interrupt from irr to isr.
*
* Transition 'vector' from IRR to ISR. This function is called with the
* vector returned by 'vlapic_find_deliverable_intr()' when the guest is able to
* accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
* block interrupt delivery).
*
* @param[in] vlapic Pointer to target vLAPIC data structure
* @param[in] vector Target virtual interrupt vector
*
* @return None
*
* @pre vlapic != NULL
*/
void vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vector);
bool vlapic_inject_intr(struct acrn_vlapic *vlapic, bool guest_irq_enabled, bool injected);
/**
* @brief Get physical address to PIR description.
@ -231,7 +216,6 @@ void vlapic_reset(struct acrn_vlapic *vlapic);
void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs);
uint64_t vlapic_apicv_get_apic_access_addr(void);
uint64_t vlapic_apicv_get_apic_page_addr(struct acrn_vlapic *vlapic);
void vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic);
int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu);