HV: improve pass-thru device interrupt process

for each pass-thru device and its entry owned by one VM, so
change the pass-thru device's softirq lock & entry list into
per VM, so one VM's PT device interrupt process will not
interfere with other VMs; especially in case one user OS
"interrupt storm" happens, it will have little effect on
service OS.

Tracked-On: #866
Signed-off-by: Minggui Cao <minggui.cao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Minggui Cao 2018-09-05 14:07:09 +08:00 committed by lijinxia
parent b4e03f2d68
commit 99285f844a
5 changed files with 30 additions and 28 deletions

View File

@ -394,26 +394,27 @@ static void ptdev_intr_handle_irq(struct vm *vm,
} }
} }
void ptdev_softirq(__unused uint16_t cpu_id) void ptdev_softirq(uint16_t pcpu_id)
{ {
struct vcpu *vcpu = (struct vcpu *)per_cpu(vcpu, pcpu_id);
struct vm *vm = vcpu->vm;
while (1) { while (1) {
struct ptdev_remapping_info *entry = ptdev_dequeue_softirq(); struct ptdev_remapping_info *entry = ptdev_dequeue_softirq(vm);
struct ptdev_msi_info *msi = &entry->msi; struct ptdev_msi_info *msi;
struct vm *vm;
if (entry == NULL) { if (entry == NULL) {
break; break;
} }
msi = &entry->msi;
/* skip any inactive entry */ /* skip any inactive entry */
if (!is_entry_active(entry)) { if (!is_entry_active(entry)) {
/* service next item */ /* service next item */
continue; continue;
} }
/* TBD: need valid vm */
vm = entry->vm;
/* handle real request */ /* handle real request */
if (entry->intr_type == PTDEV_INTR_INTX) { if (entry->intr_type == PTDEV_INTR_INTX) {
ptdev_intr_handle_irq(vm, entry); ptdev_intr_handle_irq(vm, entry);

View File

@ -185,6 +185,9 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
list_add(&vm->list, &vm_list); list_add(&vm->list, &vm_list);
spinlock_release(&vm_list_lock); spinlock_release(&vm_list_lock);
INIT_LIST_HEAD(&vm->softirq_dev_entry_list);
spinlock_init(&vm->softirq_dev_lock);
/* Set up IO bit-mask such that VM exit occurs on /* Set up IO bit-mask such that VM exit occurs on
* selected IO ranges * selected IO ranges
*/ */

View File

@ -8,52 +8,49 @@
#include <softirq.h> #include <softirq.h>
#include <ptdev.h> #include <ptdev.h>
/* SOFTIRQ_PTDEV list for all CPUs */
static struct list_head softirq_dev_entry_list;
/* passthrough device link */ /* passthrough device link */
struct list_head ptdev_list; struct list_head ptdev_list;
spinlock_t ptdev_lock; spinlock_t ptdev_lock;
/* /*
* entry could both be in ptdev_list and softirq_dev_entry_list. * entry could both be in ptdev_list and vm->softirq_dev_entry_list.
* When release entry, we need make sure entry deleted from both * When release entry, we need make sure entry deleted from both
* lists. We have to require two locks and the lock sequence is: * lists. We have to require two locks and the lock sequence is:
* ptdev_lock * ptdev_lock
* softirq_dev_lock * vm->softirq_dev_lock
*/ */
static spinlock_t softirq_dev_lock;
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry) static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
{ {
uint64_t rflags; uint64_t rflags;
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */ /* enqueue request in order, SOFTIRQ_PTDEV will pickup */
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags); spinlock_irqsave_obtain(&entry->vm->softirq_dev_lock, &rflags);
/* avoid adding recursively */ /* avoid adding recursively */
list_del(&entry->softirq_node); list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */ /* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node, list_add_tail(&entry->softirq_node,
&softirq_dev_entry_list); &entry->vm->softirq_dev_entry_list);
spinlock_irqrestore_release(&softirq_dev_lock, rflags); spinlock_irqrestore_release(&entry->vm->softirq_dev_lock, rflags);
fire_softirq(SOFTIRQ_PTDEV); fire_softirq(SOFTIRQ_PTDEV);
} }
struct ptdev_remapping_info* struct ptdev_remapping_info*
ptdev_dequeue_softirq(void) ptdev_dequeue_softirq(struct vm *vm)
{ {
uint64_t rflags; uint64_t rflags;
struct ptdev_remapping_info *entry = NULL; struct ptdev_remapping_info *entry = NULL;
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags); spinlock_irqsave_obtain(&vm->softirq_dev_lock, &rflags);
if (!list_empty(&softirq_dev_entry_list)) { if (!list_empty(&vm->softirq_dev_entry_list)) {
entry = get_first_item(&softirq_dev_entry_list, entry = get_first_item(&vm->softirq_dev_entry_list,
struct ptdev_remapping_info, softirq_node); struct ptdev_remapping_info, softirq_node);
list_del_init(&entry->softirq_node); list_del_init(&entry->softirq_node);
} }
spinlock_irqrestore_release(&softirq_dev_lock, rflags); spinlock_irqrestore_release(&vm->softirq_dev_lock, rflags);
return entry; return entry;
} }
@ -91,9 +88,9 @@ release_entry(struct ptdev_remapping_info *entry)
* remove entry from softirq list.the ptdev_lock * remove entry from softirq list.the ptdev_lock
* is required before calling release_entry. * is required before calling release_entry.
*/ */
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags); spinlock_irqsave_obtain(&entry->vm->softirq_dev_lock, &rflags);
list_del_init(&entry->softirq_node); list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock, rflags); spinlock_irqrestore_release(&entry->vm->softirq_dev_lock, rflags);
free(entry); free(entry);
} }
@ -149,9 +146,9 @@ ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
entry->allocated_pirq = IRQ_INVALID; entry->allocated_pirq = IRQ_INVALID;
/* remove from softirq list if added */ /* remove from softirq list if added */
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags); spinlock_irqsave_obtain(&entry->vm->softirq_dev_lock, &rflags);
list_del_init(&entry->softirq_node); list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock, rflags); spinlock_irqrestore_release(&entry->vm->softirq_dev_lock, rflags);
} }
void ptdev_init(void) void ptdev_init(void)
@ -161,8 +158,6 @@ void ptdev_init(void)
INIT_LIST_HEAD(&ptdev_list); INIT_LIST_HEAD(&ptdev_list);
spinlock_init(&ptdev_lock); spinlock_init(&ptdev_lock);
INIT_LIST_HEAD(&softirq_dev_entry_list);
spinlock_init(&softirq_dev_lock);
register_softirq(SOFTIRQ_PTDEV, ptdev_softirq); register_softirq(SOFTIRQ_PTDEV, ptdev_softirq);
} }

View File

@ -167,6 +167,9 @@ struct vm {
struct vpci vpci; struct vpci vpci;
uint8_t vrtc_offset; uint8_t vrtc_offset;
#endif #endif
spinlock_t softirq_dev_lock;
struct list_head softirq_dev_entry_list;
}; };
#ifdef CONFIG_PARTITION_MODE #ifdef CONFIG_PARTITION_MODE

View File

@ -66,11 +66,11 @@ struct ptdev_remapping_info {
extern struct list_head ptdev_list; extern struct list_head ptdev_list;
extern spinlock_t ptdev_lock; extern spinlock_t ptdev_lock;
void ptdev_softirq(__unused uint16_t cpu_id); void ptdev_softirq(uint16_t pcpu_id);
void ptdev_init(void); void ptdev_init(void);
void ptdev_release_all_entries(struct vm *vm); void ptdev_release_all_entries(struct vm *vm);
struct ptdev_remapping_info *ptdev_dequeue_softirq(void); struct ptdev_remapping_info *ptdev_dequeue_softirq(struct vm *vm);
struct ptdev_remapping_info *alloc_entry(struct vm *vm, struct ptdev_remapping_info *alloc_entry(struct vm *vm,
uint32_t intr_type); uint32_t intr_type);
void release_entry(struct ptdev_remapping_info *entry); void release_entry(struct ptdev_remapping_info *entry);