acrn-hypervisor/hypervisor/common/ptdev.c
Yan, Like a8cd6925fc hv: pirq: clean up irq handlers
There are several similar irq handlers with confusing function names and it's
not friendly to call update_irq_handler() to update a proper handler after
request_irq().

With this commit, a single generic irq handler is being used, in which, no lock
need to be acquired because our design could guarantee there is no concurrent
irq handling and irq handler request/free.
A flags field is added to irq_desc struct to select the proper processing flow
for an irq. Irqflags is defined as follows:
 IRQF_NONE       (0U)
 IRQF_LEVEL      (1U << 1U)       /* 1: level trigger; 0: edge trigger */
 IRQF_PT         (1U << 2U)       /* 1: for passthrough dev */

Because we have only one irq handler, update_irq_handler() should be replace by
set_irq_trigger_mode(), whichs set trigger mode flag of a certian irq.
Accordingly, the code where called update_irq_handler() need to be updated.

Signed-off-by: Yan, Like <like.yan@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2018-08-17 14:25:52 +08:00

183 lines
4.2 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hypervisor.h>
#include <softirq.h>
#include <ptdev.h>
/* SOFTIRQ_PTDEV list for all CPUs */
static struct list_head softirq_dev_entry_list;
/* passthrough device link */
struct list_head ptdev_list;
spinlock_t ptdev_lock;
/* invalid_entry for error return */
struct ptdev_remapping_info invalid_entry = {
.type = PTDEV_INTR_INV,
};
/*
* entry could both be in ptdev_list and softirq_dev_entry_list.
* When release entry, we need make sure entry deleted from both
* lists. We have to require two locks and the lock sequence is:
* ptdev_lock
* softirq_dev_lock
*/
static spinlock_t softirq_dev_lock;
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
{
uint64_t rflags;
/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
/* avoid adding recursively */
list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node,
&softirq_dev_entry_list);
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
fire_softirq(SOFTIRQ_PTDEV);
}
struct ptdev_remapping_info*
ptdev_dequeue_softirq(void)
{
uint64_t rflags;
struct ptdev_remapping_info *entry = NULL;
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
if (!list_empty(&softirq_dev_entry_list)) {
entry = get_first_item(&softirq_dev_entry_list,
struct ptdev_remapping_info, softirq_node);
list_del_init(&entry->softirq_node);
}
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
return entry;
}
/* require ptdev_lock protect */
struct ptdev_remapping_info *
alloc_entry(struct vm *vm, enum ptdev_intr_type type)
{
struct ptdev_remapping_info *entry;
/* allocate */
entry = calloc(1U, sizeof(*entry));
ASSERT(entry != NULL, "alloc memory failed");
entry->type = type;
entry->vm = vm;
INIT_LIST_HEAD(&entry->softirq_node);
INIT_LIST_HEAD(&entry->entry_node);
atomic_clear32(&entry->active, ACTIVE_FLAG);
list_add(&entry->entry_node, &ptdev_list);
return entry;
}
/* require ptdev_lock protect */
void
release_entry(struct ptdev_remapping_info *entry)
{
uint64_t rflags;
/* remove entry from ptdev_list */
list_del_init(&entry->entry_node);
/*
* remove entry from softirq list.the ptdev_lock
* is required before calling release_entry.
*/
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
free(entry);
}
/* require ptdev_lock protect */
static void
release_all_entries(struct vm *vm)
{
struct ptdev_remapping_info *entry;
struct list_head *pos, *tmp;
list_for_each_safe(pos, tmp, &ptdev_list) {
entry = list_entry(pos, struct ptdev_remapping_info,
entry_node);
if (entry->vm == vm)
release_entry(entry);
}
}
/* interrupt context */
static int ptdev_interrupt_handler(__unused uint32_t irq, void *data)
{
struct ptdev_remapping_info *entry =
(struct ptdev_remapping_info *) data;
ptdev_enqueue_softirq(entry);
return 0;
}
/* active intr with irq registering */
void
ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq)
{
int32_t retval;
/* register and allocate host vector/irq */
retval = request_irq(phys_irq, ptdev_interrupt_handler,
(void *)entry, IRQF_PT);
ASSERT(retval >= 0, "dev register failed");
entry->allocated_pirq = (uint32_t)retval;
atomic_set32(&entry->active, ACTIVE_FLAG);
}
void
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
{
uint64_t rflags;
atomic_clear32(&entry->active, ACTIVE_FLAG);
free_irq(entry->allocated_pirq);
entry->allocated_pirq = IRQ_INVALID;
/* remove from softirq list if added */
spinlock_irqsave_obtain(&softirq_dev_lock, &rflags);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock, rflags);
}
void ptdev_init(void)
{
if (get_cpu_id() > 0)
return;
INIT_LIST_HEAD(&ptdev_list);
spinlock_init(&ptdev_lock);
INIT_LIST_HEAD(&softirq_dev_entry_list);
spinlock_init(&softirq_dev_lock);
register_softirq(SOFTIRQ_PTDEV, ptdev_softirq);
}
void ptdev_release_all_entries(struct vm *vm)
{
/* VM already down */
spinlock_obtain(&ptdev_lock);
release_all_entries(vm);
spinlock_release(&ptdev_lock);
}