hv: remove redundant code for virtual interrupt injection

- vlapic_pending_intr() is called only for case no
    APICv/VID(Virtual Interrupt Delivery) feature support,
    in this case, there is no need to test VID case.

  - remove 'apicv_pending_intr()' function.

Tracked-On: #2427
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Yonghua Huang 2019-01-26 00:42:19 +08:00 committed by wenlingz
parent 6d5456a0df
commit c853eb4bc2
2 changed files with 36 additions and 86 deletions

View File

@ -80,9 +80,6 @@ static inline void vlapic_dump_isr(__unused const struct acrn_vlapic *vlapic, __
static int32_t static int32_t
apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector); apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector);
static int32_t
apicv_pending_intr(const struct acrn_vlapic *vlapic);
/* /*
* Post an interrupt to the vcpu running on 'hostcpu'. This will use a * Post an interrupt to the vcpu running on 'hostcpu'. This will use a
* hardware assist if available (e.g. Posted Interrupt) or fall back to * hardware assist if available (e.g. Posted Interrupt) or fall back to
@ -1332,6 +1329,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
* *
* @remark The vector does not automatically transition to the ISR as a * @remark The vector does not automatically transition to the ISR as a
* result of calling this function. * result of calling this function.
* This function is only for case that APICv/VID is NOT supported.
*/ */
int32_t int32_t
vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr) vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
@ -1341,27 +1339,22 @@ vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
const struct lapic_reg *irrptr; const struct lapic_reg *irrptr;
int32_t ret = 0; int32_t ret = 0;
if (is_apicv_intr_delivery_supported()) { irrptr = &lapic->irr[0];
ret = apicv_pending_intr(vlapic);
} else {
irrptr = &lapic->irr[0]; /* i ranges effectively from 7 to 0 */
for (i = 8U; i > 0U; ) {
/* i ranges effectively from 7 to 0 */ i--;
for (i = 8U; i > 0U; ) { val = atomic_load32(&irrptr[i].v);
i--; bitpos = (uint32_t)fls32(val);
val = atomic_load32(&irrptr[i].v); if (bitpos != INVALID_BIT_INDEX) {
bitpos = (uint32_t)fls32(val); vector = (i * 32U) + bitpos;
if (bitpos != INVALID_BIT_INDEX) { if (prio(vector) > prio(lapic->ppr.v)) {
vector = (i * 32U) + bitpos; if (vecptr != NULL) {
if (prio(vector) > prio(lapic->ppr.v)) { *vecptr = vector;
if (vecptr != NULL) {
*vecptr = vector;
}
ret = 1;
} }
break; ret = 1;
} }
break;
} }
} }
return ret; return ret;
@ -2229,46 +2222,6 @@ apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector)
return notify; return notify;
} }
static int32_t
apicv_pending_intr(const struct acrn_vlapic *vlapic)
{
const struct vlapic_pir_desc *pir_desc;
const struct lapic_regs *lapic;
uint64_t pending, pirval;
uint32_t i, ppr, vpr;
int32_t ret = 0;
pir_desc = &(vlapic->pir_desc);
pending = atomic_load64(&pir_desc->pending);
if (pending != 0U) {
lapic = &(vlapic->apic_page);
ppr = lapic->ppr.v & 0xF0U;
if (ppr == 0U) {
ret = 1;
} else {
/* i ranges effectively from 3 to 0 */
i = 4U;
while (i > 0U) {
i --;
if (pir_desc->pir[i] != 0U) {
break;
}
}
pirval = pir_desc->pir[i];
if (pirval != 0U) {
vpr = (((i * 64U) + (uint32_t)fls64(pirval)) & 0xF0U);
ret = ((vpr > ppr) ? 1 : 0);
}
}
}
return ret;
}
/** /**
*APIC-v: Get the HPA to APIC-access page *APIC-v: Get the HPA to APIC-access page
* **/ * **/

View File

@ -70,6 +70,9 @@ static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
return status; return status;
} }
/*
* This function is only for case that APICv/VID is not supported.
*/
static bool vcpu_pending_request(struct acrn_vcpu *vcpu) static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
{ {
struct acrn_vlapic *vlapic; struct acrn_vlapic *vlapic;
@ -108,39 +111,33 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
} }
} }
/*
* This function is only for case that APICv/VID is not supported.
*/
static int32_t vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu) static int32_t vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
{ {
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu); struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
uint32_t vector = 0U; uint32_t vector = 0U;
int32_t ret = 0; int32_t ret = 0;
/* ret = vlapic_pending_intr(vlapic, &vector);
* This function used for inject virtual interrupt if (ret != 0) {
* through vmcs. /*
*/ * From the Intel SDM, Volume 3, 6.3.2 Section "Maskable
if (is_apicv_intr_delivery_supported()) { * Hardware Interrupts":
ret = -1; * - maskable interrupt vectors [16,255] can be delivered
} else { * through the local APIC.
/* Query vLapic to get vector to inject */ */
ret = vlapic_pending_intr(vlapic, &vector);
if (ret != 0) {
/*
* From the Intel SDM, Volume 3, 6.3.2 Section "Maskable
* Hardware Interrupts":
* - maskable interrupt vectors [16,255] can be delivered
* through the local APIC.
*/
if (!((vector >= 16U) && (vector <= 255U))) { if (!((vector >= 16U) && (vector <= 255U))) {
dev_dbg(ACRN_DBG_INTR, "invalid vector %d from local APIC", vector); dev_dbg(ACRN_DBG_INTR, "invalid vector %d from local APIC", vector);
ret = -1; ret = -1;
} else { } else {
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID | exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
(vector & 0xFFU)); (vector & 0xFFU));
vlapic_intr_accepted(vlapic, vector); vlapic_intr_accepted(vlapic, vector);
ret = 0; ret = 0;
}
} }
} }