HV: move common stuff from assign.c

Move common stuff, like ptdev entry and softirq, to new ptdev.c

Signed-off-by: Edwin Zhai <edwin.zhai@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Edwin Zhai 2018-06-14 18:06:26 +08:00 committed by Jack Ren
parent 46f64b55b4
commit 8202ba0a70
5 changed files with 268 additions and 229 deletions

View File

@ -138,6 +138,7 @@ C_SRCS += common/trusty_hypercall.c
C_SRCS += common/schedule.c
C_SRCS += common/vm_load.c
C_SRCS += common/io_request.c
C_SRCS += common/ptdev.c
ifdef STACK_PROTECTOR
C_SRCS += common/stack_protector.c

View File

@ -6,28 +6,6 @@
#include <hypervisor.h>
#define ACTIVE_FLAG 0x1 /* any non zero should be okay */
/* SOFTIRQ_DEV_ASSIGN list for all CPUs */
static struct list_head softirq_dev_entry_list;
/* passthrough device link */
static struct list_head ptdev_list;
static spinlock_t ptdev_lock;
/* invalid_entry for error return */
static struct ptdev_remapping_info invalid_entry = {
.type = PTDEV_INTR_INV,
};
/*
* entry could both be in ptdev_list and softirq_dev_entry_list.
* When release entry, we need make sure entry deleted from both
* lists. We have to require two locks and the lock sequence is:
* ptdev_lock
* softirq_dev_lock
*/
static spinlock_t softirq_dev_lock;
static inline uint32_t
entry_id_from_msix(uint16_t bdf, int8_t index)
{
@ -166,105 +144,6 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
return entry;
}
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
/* enqueue request in order, SOFTIRQ_DEV_ASSIGN will pickup */
spinlock_irqsave_obtain(&softirq_dev_lock);
/* avoid adding recursively */
list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node,
&softirq_dev_entry_list);
spinlock_irqrestore_release(&softirq_dev_lock);
raise_softirq(SOFTIRQ_DEV_ASSIGN);
}
static struct ptdev_remapping_info*
ptdev_dequeue_softirq(void)
{
struct ptdev_remapping_info *entry = NULL;
spinlock_rflags;
spinlock_irqsave_obtain(&softirq_dev_lock);
if (!list_empty(&softirq_dev_entry_list)) {
entry = get_first_item(&softirq_dev_entry_list,
struct ptdev_remapping_info, softirq_node);
list_del_init(&entry->softirq_node);
}
spinlock_irqrestore_release(&softirq_dev_lock);
return entry;
}
/* require ptdev_lock protect */
static struct ptdev_remapping_info *
alloc_entry(struct vm *vm, enum ptdev_intr_type type)
{
struct ptdev_remapping_info *entry;
/* allocate */
entry = calloc(1, sizeof(*entry));
ASSERT(entry, "alloc memory failed");
entry->type = type;
entry->vm = vm;
INIT_LIST_HEAD(&entry->softirq_node);
INIT_LIST_HEAD(&entry->entry_node);
atomic_clear_int(&entry->active, ACTIVE_FLAG);
list_add(&entry->entry_node, &ptdev_list);
return entry;
}
/* require ptdev_lock protect */
static void
release_entry(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
/* remove entry from ptdev_list */
list_del_init(&entry->entry_node);
/*
* remove entry from softirq list.the ptdev_lock
* is required before calling release_entry.
*/
spinlock_irqsave_obtain(&softirq_dev_lock);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock);
free(entry);
}
/* require ptdev_lock protect */
static void
release_all_entries(struct vm *vm)
{
struct ptdev_remapping_info *entry;
struct list_head *pos, *tmp;
list_for_each_safe(pos, tmp, &ptdev_list) {
entry = list_entry(pos, struct ptdev_remapping_info,
entry_node);
if (entry->vm == vm)
release_entry(entry);
}
}
/* interrupt context */
static int ptdev_interrupt_handler(__unused int irq, void *data)
{
struct ptdev_remapping_info *entry =
(struct ptdev_remapping_info *) data;
ptdev_enqueue_softirq(entry);
return 0;
}
static void
ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
{
@ -306,40 +185,6 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
}
}
/* active intr with irq registering */
static struct ptdev_remapping_info *
ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq,
bool lowpri)
{
struct dev_handler_node *node;
/* register and allocate host vector/irq */
node = normal_register_handler(phys_irq, ptdev_interrupt_handler,
(void *)entry, true, lowpri, "dev assign");
ASSERT(node != NULL, "dev register failed");
entry->node = node;
atomic_set_int(&entry->active, ACTIVE_FLAG);
return entry;
}
static void
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
atomic_clear_int(&entry->active, ACTIVE_FLAG);
unregister_handler_common(entry->node);
entry->node = NULL;
/* remove from softirq list if added */
spinlock_irqsave_obtain(&softirq_dev_lock);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock);
}
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
{
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
@ -1068,25 +913,6 @@ void ptdev_remove_msix_remapping(struct vm *vm, uint16_t virt_bdf,
remove_msix_remapping(vm, virt_bdf, i);
}
void ptdev_init(void)
{
if (get_cpu_id() > 0)
return;
INIT_LIST_HEAD(&ptdev_list);
spinlock_init(&ptdev_lock);
INIT_LIST_HEAD(&softirq_dev_entry_list);
spinlock_init(&softirq_dev_lock);
}
void ptdev_release_all_entries(struct vm *vm)
{
/* VM already down */
spinlock_obtain(&ptdev_lock);
release_all_entries(vm);
spinlock_release(&ptdev_lock);
}
static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
uint32_t *irq, uint32_t *vector, uint64_t *dest, bool *lvl_tm,
int *pin, int *vpin, int *bdf, int *vbdf)

182
hypervisor/common/ptdev.c Normal file
View File

@ -0,0 +1,182 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hypervisor.h>
#include <ptdev.h>
/* SOFTIRQ_DEV_ASSIGN list for all CPUs */
struct list_head softirq_dev_entry_list;
/* passthrough device link */
struct list_head ptdev_list;
spinlock_t ptdev_lock;
/* invalid_entry for error return */
struct ptdev_remapping_info invalid_entry = {
.type = PTDEV_INTR_INV,
};
/*
* entry could both be in ptdev_list and softirq_dev_entry_list.
* When release entry, we need make sure entry deleted from both
* lists. We have to require two locks and the lock sequence is:
* ptdev_lock
* softirq_dev_lock
*/
spinlock_t softirq_dev_lock;
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
/* enqueue request in order, SOFTIRQ_DEV_ASSIGN will pickup */
spinlock_irqsave_obtain(&softirq_dev_lock);
/* avoid adding recursively */
list_del(&entry->softirq_node);
/* TODO: assert if entry already in list */
list_add_tail(&entry->softirq_node,
&softirq_dev_entry_list);
spinlock_irqrestore_release(&softirq_dev_lock);
raise_softirq(SOFTIRQ_DEV_ASSIGN);
}
struct ptdev_remapping_info*
ptdev_dequeue_softirq(void)
{
struct ptdev_remapping_info *entry = NULL;
spinlock_rflags;
spinlock_irqsave_obtain(&softirq_dev_lock);
if (!list_empty(&softirq_dev_entry_list)) {
entry = get_first_item(&softirq_dev_entry_list,
struct ptdev_remapping_info, softirq_node);
list_del_init(&entry->softirq_node);
}
spinlock_irqrestore_release(&softirq_dev_lock);
return entry;
}
/* require ptdev_lock protect */
struct ptdev_remapping_info *
alloc_entry(struct vm *vm, enum ptdev_intr_type type)
{
struct ptdev_remapping_info *entry;
/* allocate */
entry = calloc(1, sizeof(*entry));
ASSERT(entry, "alloc memory failed");
entry->type = type;
entry->vm = vm;
INIT_LIST_HEAD(&entry->softirq_node);
INIT_LIST_HEAD(&entry->entry_node);
atomic_clear_int(&entry->active, ACTIVE_FLAG);
list_add(&entry->entry_node, &ptdev_list);
return entry;
}
/* require ptdev_lock protect */
void
release_entry(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
/* remove entry from ptdev_list */
list_del_init(&entry->entry_node);
/*
* remove entry from softirq list.the ptdev_lock
* is required before calling release_entry.
*/
spinlock_irqsave_obtain(&softirq_dev_lock);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock);
free(entry);
}
/* require ptdev_lock protect */
static void
release_all_entries(struct vm *vm)
{
struct ptdev_remapping_info *entry;
struct list_head *pos, *tmp;
list_for_each_safe(pos, tmp, &ptdev_list) {
entry = list_entry(pos, struct ptdev_remapping_info,
entry_node);
if (entry->vm == vm)
release_entry(entry);
}
}
/* interrupt context */
static int ptdev_interrupt_handler(__unused int irq, void *data)
{
struct ptdev_remapping_info *entry =
(struct ptdev_remapping_info *) data;
ptdev_enqueue_softirq(entry);
return 0;
}
/* active intr with irq registering */
struct ptdev_remapping_info *
ptdev_activate_entry(struct ptdev_remapping_info *entry, int phys_irq,
bool lowpri)
{
struct dev_handler_node *node;
/* register and allocate host vector/irq */
node = normal_register_handler(phys_irq, ptdev_interrupt_handler,
(void *)entry, true, lowpri, "dev assign");
ASSERT(node != NULL, "dev register failed");
entry->node = node;
atomic_set_int(&entry->active, ACTIVE_FLAG);
return entry;
}
void
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
{
spinlock_rflags;
atomic_clear_int(&entry->active, ACTIVE_FLAG);
unregister_handler_common(entry->node);
entry->node = NULL;
/* remove from softirq list if added */
spinlock_irqsave_obtain(&softirq_dev_lock);
list_del_init(&entry->softirq_node);
spinlock_irqrestore_release(&softirq_dev_lock);
}
void ptdev_init(void)
{
if (get_cpu_id() > 0)
return;
INIT_LIST_HEAD(&ptdev_list);
spinlock_init(&ptdev_lock);
INIT_LIST_HEAD(&softirq_dev_entry_list);
spinlock_init(&softirq_dev_lock);
}
void ptdev_release_all_entries(struct vm *vm)
{
/* VM already down */
spinlock_obtain(&ptdev_lock);
release_all_entries(vm);
spinlock_release(&ptdev_lock);
}

View File

@ -7,66 +7,13 @@
#ifndef ASSIGN_H
#define ASSIGN_H
enum ptdev_intr_type {
PTDEV_INTR_MSI,
PTDEV_INTR_INTX,
PTDEV_INTR_INV,
};
enum ptdev_vpin_source {
PTDEV_VPIN_IOAPIC,
PTDEV_VPIN_PIC,
};
/* entry per guest virt vector */
struct ptdev_msi_info {
uint32_t vmsi_addr; /* virt msi_addr */
uint32_t vmsi_data; /* virt msi_data */
uint16_t vmsi_ctl; /* virt msi_ctl */
uint32_t pmsi_addr; /* phys msi_addr */
uint32_t pmsi_data; /* phys msi_data */
int msix; /* 0-MSI, 1-MSIX */
int msix_entry_index; /* MSI: 0, MSIX: index of vector table*/
int virt_vector;
int phys_vector;
};
/* entry per guest vioapic pin */
struct ptdev_intx_info {
enum ptdev_vpin_source vpin_src;
uint8_t virt_pin;
uint8_t phys_pin;
};
/* entry per each allocated irq/vector
* it represents a pass-thru device's remapping data entry which collecting
* information related with its vm and msi/intx mapping & interaction nodes
* with interrupt handler and softirq.
*/
struct ptdev_remapping_info {
struct vm *vm;
uint16_t virt_bdf; /* PCI bus:slot.func*/
uint16_t phys_bdf; /* PCI bus:slot.func*/
uint32_t active; /* 1=active, 0=inactive and to free*/
enum ptdev_intr_type type;
struct dev_handler_node *node;
struct list_head softirq_node;
struct list_head entry_node;
union {
struct ptdev_msi_info msi;
struct ptdev_intx_info intx;
} ptdev_intr_info;
};
#include <ptdev.h>
void ptdev_intx_ack(struct vm *vm, int virt_pin,
enum ptdev_vpin_source vpin_src);
int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
struct ptdev_msi_info *info);
int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info);
void ptdev_softirq(int cpu);
void ptdev_init(void);
void ptdev_release_all_entries(struct vm *vm);
int ptdev_add_intx_remapping(struct vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint8_t virt_pin, uint8_t phys_pin, bool pic_pin);
void ptdev_remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin);
@ -74,6 +21,5 @@ int ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, int vector_count);
void ptdev_remove_msix_remapping(struct vm *vm, uint16_t virt_bdf,
int vector_count);
int get_ptdev_info(char *str, int str_max);
#endif /* ASSIGN_H */

View File

@ -0,0 +1,84 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PTDEV_H
#define PTDEV_H
#define ACTIVE_FLAG 0x1 /* any non zero should be okay */
enum ptdev_intr_type {
PTDEV_INTR_MSI,
PTDEV_INTR_INTX,
PTDEV_INTR_INV,
};
enum ptdev_vpin_source {
PTDEV_VPIN_IOAPIC,
PTDEV_VPIN_PIC,
};
/* entry per guest virt vector */
struct ptdev_msi_info {
uint32_t vmsi_addr; /* virt msi_addr */
uint32_t vmsi_data; /* virt msi_data */
uint16_t vmsi_ctl; /* virt msi_ctl */
uint32_t pmsi_addr; /* phys msi_addr */
uint32_t pmsi_data; /* phys msi_data */
int msix; /* 0-MSI, 1-MSIX */
int msix_entry_index; /* MSI: 0, MSIX: index of vector table*/
int virt_vector;
int phys_vector;
};
/* entry per guest vioapic pin */
struct ptdev_intx_info {
enum ptdev_vpin_source vpin_src;
uint8_t virt_pin;
uint8_t phys_pin;
};
/* entry per each allocated irq/vector
* it represents a pass-thru device's remapping data entry which collecting
* information related with its vm and msi/intx mapping & interaction nodes
* with interrupt handler and softirq.
*/
struct ptdev_remapping_info {
struct vm *vm;
uint16_t virt_bdf; /* PCI bus:slot.func*/
uint16_t phys_bdf; /* PCI bus:slot.func*/
uint32_t active; /* 1=active, 0=inactive and to free*/
enum ptdev_intr_type type;
struct dev_handler_node *node;
struct list_head softirq_node;
struct list_head entry_node;
union {
struct ptdev_msi_info msi;
struct ptdev_intx_info intx;
} ptdev_intr_info;
};
extern struct list_head softirq_dev_entry_list;
extern struct list_head ptdev_list;
extern spinlock_t ptdev_lock;
extern struct ptdev_remapping_info invalid_entry;
extern spinlock_t softirq_dev_lock;
void ptdev_softirq(int cpu);
void ptdev_init(void);
void ptdev_release_all_entries(struct vm *vm);
int get_ptdev_info(char *str, int str_max);
struct ptdev_remapping_info *ptdev_dequeue_softirq(void);
struct ptdev_remapping_info *alloc_entry(struct vm *vm,
enum ptdev_intr_type type);
void release_entry(struct ptdev_remapping_info *entry);
struct ptdev_remapping_info *ptdev_activate_entry(
struct ptdev_remapping_info *entry,
int phys_irq, bool lowpri);
void ptdev_deactivate_entry(struct ptdev_remapping_info *entry);
#endif /* PTDEV_H */