hv: vlapic: make vlapic deliver interrupt related functions more readable

Rename vlapic_deliver_intr to vlapic_receive_intr: ioapic/msi device
deliver an interrupt to lapic.
Rename vlapic_pending_intr to vlapic_find_deliverable_intr: find a
deliverable interrupt which pending in irr and its priority large than ppr.
Rename vlapic_intr_accepted to vlapic_get_deliverable_intr: get the deliverable
interrupt from irr and set it in isr (which also raise ppr update)

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2019-02-02 00:30:20 +08:00 committed by acrnsi
parent 123c2b5fa4
commit ce19dd423e
7 changed files with 39 additions and 44 deletions

View File

@ -94,10 +94,10 @@ an interrupt, for example:
These APIs will finish by making a request for *ACRN_REQUEST_EVENT.*
.. doxygenfunction:: vlapic_intr_accepted
.. doxygenfunction:: vlapic_get_deliverable_intr
:project: Project ACRN
.. doxygenfunction:: vlapic_pending_intr
.. doxygenfunction:: vlapic_find_deliverable_intr
:project: Project ACRN
.. doxygenfunction:: vlapic_set_local_intr

View File

@ -84,7 +84,7 @@ static void ptirq_build_physical_msi(struct acrn_vm *vm, struct ptirq_msi_info *
dest = info->vmsi_addr.bits.dest_field;
phys = (info->vmsi_addr.bits.dest_mode == MSI_ADDR_DESTMODE_PHYS);
vlapic_calcdest(vm, &vdmask, dest, phys, false);
vlapic_calc_dest(vm, &vdmask, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);
/* get physical delivery mode */
@ -178,7 +178,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
/* physical destination cpu mask */
phys = (virt_rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
dest = (uint32_t)virt_rte.bits.dest_field;
vlapic_calcdest(vm, &vdmask, dest, phys, false);
vlapic_calc_dest(vm, &vdmask, dest, phys, false);
pdmask = vcpumask2pcpumask(vm, vdmask);
/* physical delivery mode */

View File

@ -77,16 +77,12 @@ static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic;
uint32_t vector = 0U;
int32_t ret = 0;
/* Query vLapic to get vector to inject */
vlapic = vcpu_vlapic(vcpu);
ret = vlapic_pending_intr(vlapic, &vector);
/* we need to check and raise request if we have pending event
* in LAPIC IRR
*/
if (ret != 0) {
/* check and raise request if we have a deliverable irq in LAPIC IRR */
if (vlapic_find_deliverable_intr(vlapic, &vector)) {
/* we have pending IRR */
vcpu_make_request(vcpu, ACRN_REQUEST_EVENT);
}
@ -124,8 +120,7 @@ static int32_t vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
uint32_t vector = 0U;
int32_t ret = 0;
ret = vlapic_pending_intr(vlapic, &vector);
if (ret != 0) {
if (vlapic_find_deliverable_intr(vlapic, &vector)) {
/*
* From the Intel SDM, Volume 3, 6.3.2 Section "Maskable
* Hardware Interrupts":
@ -140,7 +135,7 @@ static int32_t vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
(vector & 0xFFU));
vlapic_intr_accepted(vlapic, vector);
vlapic_get_deliverable_intr(vlapic, vector);
ret = 1;
}
}

View File

@ -1026,7 +1026,7 @@ vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t vector)
* addressing specified by the (dest, phys, lowprio) tuple.
*/
void
vlapic_calcdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys, bool lowprio)
vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys, bool lowprio)
{
struct acrn_vlapic *vlapic;
struct acrn_vlapic *target = NULL;
@ -1140,7 +1140,7 @@ vlapic_calcdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys, b
* when lapic is passthru.
*/
void
vlapic_calcdest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys)
vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys)
{
struct acrn_vlapic *vlapic;
struct acrn_vcpu *vcpu;
@ -1319,7 +1319,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
switch (shorthand) {
case APIC_DEST_DESTFLD:
vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false);
vlapic_calc_dest(vlapic->vm, &dmask, dest, phys, false);
break;
case APIC_DEST_SELF:
bitmap_set_nolock(vlapic->vcpu->vcpu_id, &dmask);
@ -1369,26 +1369,26 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
}
/**
* @brief Get pending virtual interrupts for vLAPIC.
* @brief Find a deliverable virtual interrupts for vLAPIC in irr.
*
* @param[in] vlapic Pointer to target vLAPIC data structure
* @param[inout] vecptr Pointer to vector buffer and will be filled
* with eligible vector if any.
*
* @retval 0 There is no eligible pending vector.
* @retval 1 There is pending vector.
* @retval false There is no deliverable pending vector.
* @retval true There is deliverable vector.
*
* @remark The vector does not automatically transition to the ISR as a
* result of calling this function.
* This function is only for case that APICv/VID is NOT supported.
*/
int32_t
vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
bool
vlapic_find_deliverable_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
{
const struct lapic_regs *lapic = &(vlapic->apic_page);
uint32_t i, vector, val, bitpos;
const struct lapic_reg *irrptr;
int32_t ret = 0;
bool ret = false;
irrptr = &lapic->irr[0];
@ -1403,7 +1403,7 @@ vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
if (vecptr != NULL) {
*vecptr = vector;
}
ret = 1;
ret = true;
}
break;
}
@ -1412,10 +1412,10 @@ vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
}
/**
* @brief Accept virtual interrupt.
* @brief Get a deliverable virtual interrupt from irr to isr.
*
* Transition 'vector' from IRR to ISR. This function is called with the
* vector returned by 'vlapic_pending_intr()' when the guest is able to
* vector returned by 'vlapic_find_deliverable_intr()' when the guest is able to
* accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
* block interrupt delivery).
*
@ -1427,7 +1427,7 @@ vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr)
* @pre vlapic != NULL
*/
void
vlapic_intr_accepted(struct acrn_vlapic *vlapic, uint32_t vector)
vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vector)
{
struct lapic_regs *lapic = &(vlapic->apic_page);
struct lapic_reg *irrptr, *isrptr;
@ -1441,11 +1441,11 @@ vlapic_intr_accepted(struct acrn_vlapic *vlapic, uint32_t vector)
irrptr = &lapic->irr[0];
atomic_clear32(&irrptr[idx].v, 1U << (vector & 0x1fU));
vlapic_dump_irr(vlapic, "vlapic_intr_accepted");
vlapic_dump_irr(vlapic, "vlapic_get_deliverable_intr");
isrptr = &lapic->isr[0];
isrptr[idx].v |= 1U << (vector & 0x1fU);
vlapic_dump_isr(vlapic, "vlapic_intr_accepted");
vlapic_dump_isr(vlapic, "vlapic_get_deliverable_intr");
/*
* Update the PPR
@ -1842,7 +1842,7 @@ vlapic_set_apicbase(struct acrn_vlapic *vlapic, uint64_t new)
}
void
vlapic_deliver_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
vlapic_receive_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
uint32_t delmode, uint32_t vec, bool rh)
{
bool lowprio;
@ -1863,7 +1863,7 @@ vlapic_deliver_intr(struct acrn_vm *vm, bool level, uint32_t dest, bool phys,
* all interrupts originating from the ioapic or MSI specify the
* 'dest' in the legacy xAPIC format.
*/
vlapic_calcdest(vm, &dmask, dest, phys, lowprio);
vlapic_calc_dest(vm, &dmask, dest, phys, lowprio);
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
struct acrn_vlapic *vlapic;
@ -2013,7 +2013,7 @@ vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg)
dev_dbg(ACRN_DBG_LAPIC, "lapic MSI %s dest %#x, vec %u",
phys ? "physical" : "logical", dest, vec);
vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec, rh);
vlapic_receive_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec, rh);
ret = 0;
} else {
dev_dbg(ACRN_DBG_LAPIC, "lapic MSI invalid addr %#lx", address.full);

View File

@ -400,7 +400,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
* the delivery mode of vmsi will be forwarded to ICR delievry field
* and handled by hardware.
*/
vlapic_calcdest_lapic_pt(vm, &vdmask, vdest, phys);
vlapic_calc_dest_lapic_pt(vm, &vdmask, vdest, phys);
dev_dbg(ACRN_DBG_LAPICPT, "%s: vcpu destination mask 0x%016llx", __func__, vdmask);
vcpu_id = ffs64(vdmask);

View File

@ -73,7 +73,7 @@ vioapic_generate_intr(struct acrn_vioapic *vioapic, uint32_t pin)
}
vector = rte.bits.vector;
dest = rte.bits.dest_field;
vlapic_deliver_intr(vioapic->vm, level, dest, phys, delmode, vector, false);
vlapic_receive_intr(vioapic->vm, level, dest, phys, delmode, vector, false);
}
}
}
@ -217,7 +217,7 @@ vioapic_update_eoi_exit(const struct acrn_vioapic *vioapic)
} else {
dest = (uint32_t)rte.bits.dest_field;
phys = (rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
vlapic_calcdest(vioapic->vm, &mask, dest, phys, false);
vlapic_calc_dest(vioapic->vm, &mask, dest, phys, false);
for (vcpu_id = ffs64(mask); vcpu_id != INVALID_BIT_INDEX; vcpu_id = ffs64(mask)) {
vcpu = vcpu_from_vid(vioapic->vm, vcpu_id);

View File

@ -122,25 +122,25 @@ uint64_t vlapic_get_cr8(const struct acrn_vlapic *vlapic);
/**
* @brief Get pending virtual interrupts for vLAPIC.
* @brief Find a deliverable virtual interrupts for vLAPIC in irr.
*
* @param[in] vlapic Pointer to target vLAPIC data structure
* @param[inout] vecptr Pointer to vector buffer and will be filled
* with eligible vector if any.
*
* @retval 0 There is no eligible pending vector.
* @retval 1 There is pending vector.
* @retval false There is no deliverable pending vector.
* @retval true There is deliverable vector.
*
* @remark The vector does not automatically transition to the ISR as a
* result of calling this function.
*/
int32_t vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr);
bool vlapic_find_deliverable_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr);
/**
* @brief Accept virtual interrupt.
* @brief Get a deliverable virtual interrupt from irr to isr.
*
* Transition 'vector' from IRR to ISR. This function is called with the
* vector returned by 'vlapic_pending_intr()' when the guest is able to
* vector returned by 'vlapic_find_deliverable_intr()' when the guest is able to
* accept this interrupt (i.e. RFLAGS.IF = 1 and no conditions exist that
* block interrupt delivery).
*
@ -151,7 +151,7 @@ int32_t vlapic_pending_intr(const struct acrn_vlapic *vlapic, uint32_t *vecptr);
*
* @pre vlapic != NULL
*/
void vlapic_intr_accepted(struct acrn_vlapic *vlapic, uint32_t vector);
void vlapic_get_deliverable_intr(struct acrn_vlapic *vlapic, uint32_t vector);
/**
* @brief Send notification vector to target pCPU.
@ -224,7 +224,7 @@ int32_t vlapic_set_local_intr(struct acrn_vm *vm, uint16_t vcpu_id_arg, uint32_t
*/
int32_t vlapic_intr_msi(struct acrn_vm *vm, uint64_t addr, uint64_t msg);
void vlapic_deliver_intr(struct acrn_vm *vm, bool level, uint32_t dest,
void vlapic_receive_intr(struct acrn_vm *vm, bool level, uint32_t dest,
bool phys, uint32_t delmode, uint32_t vec, bool rh);
uint32_t vlapic_get_apicid(const struct acrn_vlapic *vlapic);
@ -248,8 +248,8 @@ int32_t apic_access_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t veoi_vmexit_handler(struct acrn_vcpu *vcpu);
int32_t tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu);
void vlapic_calcdest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys, bool lowprio);
void vlapic_calcdest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys, bool lowprio);
void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
/**
* @}