hv: vmcs: simplify update EOI-exit bitmap

1) The previous implementaion will recalculate the whole EOI-exit bitmap for
each RTE once the destination, trigger mode, delivery mode or vector of a RTE
has changed and update the EOI-exit bitmap for each vcpu of the VM.
In this patch, only set the corresponding bit of EOI-exit bitmap for
a vcpu when a level triggered interrupt has accepted in IRR or clear the
corresponding bit of EOI-exit bitmap for a vcpu when a dege triggered interrupt
has accepted in IRR which means only update a bit of EOI-exit bitmap in a vcpu
when updating TMR.
2) Rename set eoi_exit related API to set eoi_exit_bitmap.

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2019-02-13 23:41:40 +08:00 committed by wenlingz
parent 501b3f7e82
commit 2e60adef7c
5 changed files with 47 additions and 90 deletions

View File

@ -149,13 +149,23 @@ void vcpu_set_vmcs_eoi_exit(struct acrn_vcpu *vcpu)
* called with vcpu->arch.lock held
* @pre vcpu != NULL && vector <= 255U
*/
void vcpu_set_eoi_exit(struct acrn_vcpu *vcpu, uint32_t vector)
void vcpu_set_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector)
{
pr_dbg("%s", __func__);
if (bitmap_test_and_set_nolock((uint16_t)(vector & 0x3fU),
if (!bitmap_test_and_set_nolock((uint16_t)(vector & 0x3fU),
&(vcpu->arch.eoi_exit_bitmap[(vector & 0xffU) >> 6U]))) {
pr_warn("Duplicated vector %u vcpu%u", vector, vcpu->vcpu_id);
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
}
}
void vcpu_clear_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector)
{
pr_dbg("%s", __func__);
if (bitmap_test_and_clear_nolock((uint16_t)(vector & 0x3fU),
&(vcpu->arch.eoi_exit_bitmap[(vector & 0xffU) >> 6U]))) {
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
}
}
@ -163,11 +173,12 @@ void vcpu_set_eoi_exit(struct acrn_vcpu *vcpu, uint32_t vector)
* Reset all eoi_exit_bitmaps
* called with vcpu->arch.lock held
*/
void vcpu_reset_eoi_exit_all(struct acrn_vcpu *vcpu)
void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu)
{
pr_dbg("%s", __func__);
memset((void *)(vcpu->arch.eoi_exit_bitmap), 0U, sizeof(vcpu->arch.eoi_exit_bitmap));
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE);
}
struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id)

View File

@ -454,7 +454,7 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
flush_vpid_single(arch->vpid);
}
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EOI_EXIT_UPDATE, pending_req_bits)) {
if (bitmap_test_and_clear_lock(ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE, pending_req_bits)) {
vcpu_set_vmcs_eoi_exit(vcpu);
}

View File

@ -466,9 +466,13 @@ vlapic_set_tmr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
lapic = &(vlapic->apic_page);
tmrptr = &lapic->tmr[0];
if (level) {
bitmap32_set_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v);
if (!bitmap32_test_and_set_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v)) {
vcpu_set_eoi_exit_bitmap(vlapic->vcpu, vector);
}
} else {
bitmap32_clear_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v);
if (bitmap32_test_and_clear_lock((uint16_t)(vector & 0x1fU), &tmrptr[vector >> 5U].v)) {
vcpu_clear_eoi_exit_bitmap(vlapic->vcpu, vector);
}
}
}
@ -485,6 +489,8 @@ vlapic_reset_tmr(struct acrn_vlapic *vlapic)
for (i = 0; i < 8; i++) {
lapic->tmr[i].v = 0U;
}
vcpu_reset_eoi_exit_bitmaps(vlapic->vcpu);
}
/*

View File

@ -175,66 +175,6 @@ vioapic_set_irqline_lock(const struct acrn_vm *vm, uint32_t irqline, uint32_t op
spinlock_release(&(vioapic->mtx));
}
/*
* Generate eoi_exit_bitmap and request each VCPU to update VMCS fields
* To be called with vioapic->mtx
* @pre vioapic != NULL
*/
static void
vioapic_update_eoi_exit(const struct acrn_vioapic *vioapic)
{
struct acrn_vcpu *vcpu;
union ioapic_rte rte;
uint64_t mask;
uint32_t vector, delmode, dest;
uint32_t pin, pincount;
uint16_t vcpu_id;
bool level, phys;
dev_dbg(ACRN_DBG_IOAPIC, "%s", __func__);
/* clear old bitmap to generate new bitmap */
foreach_vcpu(vcpu_id, vioapic->vm, vcpu) {
spinlock_obtain(&(vcpu->arch.lock));
vcpu_reset_eoi_exit_all(vcpu);
}
/* go through RTEs and set corresponding bits of eoi_exit_bitmap */
pincount = vioapic_pincount(vioapic->vm);
for (pin = 0U; pin < pincount; pin++) {
rte = vioapic->rtbl[pin];
level = (rte.bits.trigger_mode == IOAPIC_RTE_TRGRMODE_LEVEL);
vector = rte.bits.vector;
if (level && ((vector >= 0x20U) && (vector < NR_MAX_VECTOR))) {
/* if level-trigger and vector is valid */
delmode = (uint32_t)rte.bits.delivery_mode;
if ((delmode != IOAPIC_RTE_DELMODE_FIXED) && (delmode != IOAPIC_RTE_DELMODE_LOPRI)) {
dev_dbg(ACRN_DBG_IOAPIC,
"Ignoring level trigger-mode for delivery-mode 0x%x", delmode);
} else {
dest = (uint32_t)rte.bits.dest_field;
phys = (rte.bits.dest_mode == IOAPIC_RTE_DESTMODE_PHY);
vlapic_calc_dest(vioapic->vm, &mask, dest, phys, false);
for (vcpu_id = ffs64(mask); vcpu_id != INVALID_BIT_INDEX; vcpu_id = ffs64(mask)) {
vcpu = vcpu_from_vid(vioapic->vm, vcpu_id);
vcpu_set_eoi_exit(vcpu, vector);
bitmap_clear_nolock(vcpu_id, &mask);
}
}
}
}
/* make request if eoi_exit_bitmap changed */
foreach_vcpu(vcpu_id, vioapic->vm, vcpu) {
spinlock_release(&(vcpu->arch.lock));
vcpu_make_request(vcpu, ACRN_REQUEST_EOI_EXIT_UPDATE);
}
}
static uint32_t
vioapic_indirect_read(const struct acrn_vioapic *vioapic, uint32_t addr)
{
@ -377,17 +317,6 @@ static void vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
vioapic->rtbl[pin] = new;
dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: redir table entry %#lx",
pin, vioapic->rtbl[pin].full);
/*
* If "Trigger Mode" or "Delivery Mode" or "Vector"
* in the redirection table entry have changed then
* rendezvous all the vcpus to update their vlapic
* trigger-mode registers.
*/
if ((changed.bits.vector != 0UL) || (changed.bits.delivery_mode != 0UL)
|| (changed.bits.trigger_mode != 0UL) || (changed.bits.dest_field != 0UL)) {
dev_dbg(ACRN_DBG_IOAPIC, "ioapic pin%hhu: recalculate vlapic trigger-mode reg", pin);
vioapic_update_eoi_exit(vioapic);
}
/*
* Generate an interrupt if the following conditions are met:

View File

@ -71,14 +71,14 @@
/*
* VCPU related APIs
*/
#define ACRN_REQUEST_EXCP 0U
#define ACRN_REQUEST_EVENT 1U
#define ACRN_REQUEST_EXTINT 2U
#define ACRN_REQUEST_NMI 3U
#define ACRN_REQUEST_EOI_EXIT_UPDATE 4U
#define ACRN_REQUEST_EPT_FLUSH 5U
#define ACRN_REQUEST_TRP_FAULT 6U
#define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */
#define ACRN_REQUEST_EXCP 0U
#define ACRN_REQUEST_EVENT 1U
#define ACRN_REQUEST_EXTINT 2U
#define ACRN_REQUEST_NMI 3U
#define ACRN_REQUEST_EOI_EXIT_BITMAP_UPDATE 4U
#define ACRN_REQUEST_EPT_FLUSH 5U
#define ACRN_REQUEST_TRP_FAULT 6U
#define ACRN_REQUEST_VPID_FLUSH 7U /* flush vpid tlb */
#define save_segment(seg, SEG_NAME) \
{ \
@ -503,14 +503,14 @@ void vcpu_set_guest_msr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t val);
void vcpu_set_vmcs_eoi_exit(struct acrn_vcpu *vcpu);
/**
* @brief reset eoi_exit_bitmap
* @brief reset all eoi_exit_bitmaps
*
* @param[in] vcpu pointer to vcpu data structure
*
* @return void
*/
void vcpu_reset_eoi_exit_all(struct acrn_vcpu *vcpu);
void vcpu_reset_eoi_exit_bitmaps(struct acrn_vcpu *vcpu);
/**
* @brief set eoi_exit_bitmap bit
@ -520,9 +520,20 @@ void vcpu_reset_eoi_exit_all(struct acrn_vcpu *vcpu);
* @param[in] vcpu pointer to vcpu data structure
* @param[in] vector
*
* @return void
* @return void
*/
void vcpu_set_eoi_exit(struct acrn_vcpu *vcpu, uint32_t vector);
void vcpu_set_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
/**
* @brief clear eoi_exit_bitmap bit
*
* Clear corresponding bit of vector in eoi_exit_bitmap
*
* @param[in] vcpu pointer to vcpu data structure
* @param[in] vector
*
* @return void
*/
void vcpu_clear_eoi_exit_bitmap(struct acrn_vcpu *vcpu, uint32_t vector);
/**
* @brief set all the vcpu registers
*