hv: ptdev: move softirq_dev_entry_list from vm structure to per_cpu region

Using per_cpu list to record ptdev interrupts is more reasonable than
recording them per-vm. It makes dispatching such interrupts more easier
as we now do it in softirq which happens following interrupt context of
each pcpu.

Tracked-On: #3663
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2019-09-03 11:50:01 +08:00 committed by wenlingz
parent 2cc45534d6
commit 4742d1c747
7 changed files with 18 additions and 22 deletions

View File

@ -248,6 +248,7 @@ void init_pcpu_post(uint16_t pcpu_id)
init_interrupt(pcpu_id);
timer_init();
ptdev_init();
/* Wait for boot processor to signal all secondary cores to continue */
wait_sync_change(&pcpu_sync, 0UL);

View File

@ -520,11 +520,8 @@ static void ptirq_handle_intx(struct acrn_vm *vm,
void ptirq_softirq(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = per_cpu(vcpu, pcpu_id);
struct acrn_vm *vm = vcpu->vm;
while (1) {
struct ptirq_remapping_info *entry = ptirq_dequeue_softirq(vm);
struct ptirq_remapping_info *entry = ptirq_dequeue_softirq(pcpu_id);
struct ptirq_msi_info *msi;
if (entry == NULL) {
@ -541,11 +538,11 @@ void ptirq_softirq(uint16_t pcpu_id)
/* handle real request */
if (entry->intr_type == PTDEV_INTR_INTX) {
ptirq_handle_intx(vm, entry);
ptirq_handle_intx(entry->vm, entry);
} else {
if (msi != NULL) {
/* TODO: msi destmode check required */
(void)vlapic_intr_msi(vm, msi->vmsi_addr.full, msi->vmsi_data.full);
(void)vlapic_intr_msi(entry->vm, msi->vmsi_addr.full, msi->vmsi_data.full);
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x MSI VR: 0x%x-0x%x",
entry->allocated_pirq,
msi->vmsi_data.bits.vector,

View File

@ -478,8 +478,6 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
if (status == 0) {
prepare_epc_vm_memmap(vm);
INIT_LIST_HEAD(&vm->softirq_dev_entry_list);
spinlock_init(&vm->softirq_dev_lock);
spinlock_init(&vm->vm_lock);
vm->arch_vm.vlapic_state = VM_VLAPIC_XAPIC;

View File

@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <per_cpu.h>
#include <vm.h>
#include <softirq.h>
#include <ptdev.h>
@ -34,13 +35,13 @@ static void ptirq_enqueue_softirq(struct ptirq_remapping_info *entry)
uint64_t rflags;
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
spinlock_irqsave_obtain(&entry->vm->softirq_dev_lock, &rflags);
CPU_INT_ALL_DISABLE(&rflags);
/* avoid adding recursively */
list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node, &entry->vm->softirq_dev_entry_list);
spinlock_irqrestore_release(&entry->vm->softirq_dev_lock, rflags);
list_add_tail(&entry->softirq_node, &get_cpu_var(softirq_dev_entry_list));
CPU_INT_ALL_RESTORE(rflags);
fire_softirq(SOFTIRQ_PTDEV);
}
@ -51,15 +52,15 @@ static void ptirq_intr_delay_callback(void *data)
ptirq_enqueue_softirq(entry);
}
struct ptirq_remapping_info *ptirq_dequeue_softirq(struct acrn_vm *vm)
struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id)
{
uint64_t rflags;
struct ptirq_remapping_info *entry = NULL;
spinlock_irqsave_obtain(&vm->softirq_dev_lock, &rflags);
CPU_INT_ALL_DISABLE(&rflags);
while (!list_empty(&vm->softirq_dev_entry_list)) {
entry = get_first_item(&vm->softirq_dev_entry_list, struct ptirq_remapping_info, softirq_node);
while (!list_empty(&get_cpu_var(softirq_dev_entry_list))) {
entry = get_first_item(&per_cpu(softirq_dev_entry_list, pcpu_id), struct ptirq_remapping_info, softirq_node);
list_del_init(&entry->softirq_node);
@ -73,7 +74,7 @@ struct ptirq_remapping_info *ptirq_dequeue_softirq(struct acrn_vm *vm)
}
}
spinlock_irqrestore_release(&vm->softirq_dev_lock, rflags);
CPU_INT_ALL_RESTORE(rflags);
return entry;
}
@ -106,10 +107,10 @@ void ptirq_release_entry(struct ptirq_remapping_info *entry)
{
uint64_t rflags;
spinlock_irqsave_obtain(&entry->vm->softirq_dev_lock, &rflags);
CPU_INT_ALL_DISABLE(&rflags);
list_del_init(&entry->softirq_node);
del_timer(&entry->intr_delay_timer);
spinlock_irqrestore_release(&entry->vm->softirq_dev_lock, rflags);
CPU_INT_ALL_RESTORE(rflags);
bitmap_clear_nolock((entry->ptdev_entry_id) & 0x3FU,
&ptirq_entry_bitmaps[((entry->ptdev_entry_id) & 0x3FU) >> 6U]);
@ -176,10 +177,10 @@ void ptirq_deactivate_entry(struct ptirq_remapping_info *entry)
void ptdev_init(void)
{
if (get_pcpu_id() == BOOT_CPU_ID) {
spinlock_init(&ptdev_lock);
register_softirq(SOFTIRQ_PTDEV, ptirq_softirq);
}
INIT_LIST_HEAD(&get_cpu_var(softirq_dev_entry_list));
}
void ptdev_release_all_entries(const struct acrn_vm *vm)

View File

@ -143,8 +143,6 @@ struct acrn_vm {
uint8_t vrtc_offset;
spinlock_t softirq_dev_lock;
struct list_head softirq_dev_entry_list;
uint64_t intr_inject_delay_delta; /* delay of intr injection */
} __aligned(PAGE_SIZE);

View File

@ -51,6 +51,7 @@ struct per_cpu_region {
uint32_t lapic_ldr;
uint32_t softirq_servicing;
struct smp_call_info_data smp_call_info;
struct list_head softirq_dev_entry_list;
#ifdef PROFILING_ON
struct profiling_info_wrapper profiling_info;
#endif

View File

@ -154,7 +154,7 @@ void ptirq_softirq(uint16_t pcpu_id);
void ptdev_init(void);
void ptdev_release_all_entries(const struct acrn_vm *vm);
struct ptirq_remapping_info *ptirq_dequeue_softirq(struct acrn_vm *vm);
struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id);
struct ptirq_remapping_info *ptirq_alloc_entry(struct acrn_vm *vm, uint32_t intr_type);
void ptirq_release_entry(struct ptirq_remapping_info *entry);
int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_irq);