HV: remove vdev ops for sharing mode

Remove vdev ops for sharing mode, directly call the corresponding functions
instead of calling the ops callbacks (indirectly)

Remove alloc_pci_vdev() and merge its code into init_vdev_for_pdev() to simplify code

Remove @pre for local variables

Change the return value from int32_t to void to comply with misra c and
add ASSERT in the functions (if necessary) to verify the assumptions for debug build:
 vmsi_init
 vmsix_init
 vmsi_deinit
 vmsix_deinit

Add @pre for vmsix_init_helper and make it a void function, use ASSERT to verify
the assumption for debug build.

Add ASSERT in get_sos_vm

Tracked-On: #2534
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
dongshen 2019-03-05 17:47:25 -08:00 committed by wenlingz
parent eb4f46987a
commit 19c5342506
6 changed files with 80 additions and 176 deletions

View File

@ -110,6 +110,8 @@ struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
/* return a pointer to the virtual machine structure of SOS VM */
struct acrn_vm *get_sos_vm(void)
{
ASSERT(sos_vm_ptr != NULL, "sos_vm_ptr is NULL");
return sos_vm_ptr;
}

View File

@ -167,22 +167,13 @@ int32_t vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, ui
return ret;
}
int32_t vmsi_deinit(const struct pci_vdev *vdev)
void vmsi_deinit(const struct pci_vdev *vdev)
{
if (has_msi_cap(vdev)) {
ptirq_remove_msix_remapping(vdev->vpci->vm, vdev->vbdf.value, 1U);
}
return 0;
}
const struct pci_vdev_ops pci_ops_vdev_msi = {
.init = vmsi_init,
.deinit = vmsi_deinit,
.cfgwrite = vmsi_cfgwrite,
.cfgread = vmsi_cfgread,
};
/* Read a uint32_t from buffer (little endian) */
static uint32_t buf_read32(const uint8_t buf[])
{
@ -198,7 +189,7 @@ static void buf_write32(uint8_t buf[], uint32_t val)
buf[3] = (uint8_t)((val >> 24U) & 0xFFU);
}
int32_t vmsi_init(struct pci_vdev *vdev)
void vmsi_init(struct pci_vdev *vdev)
{
struct pci_pdev *pdev = vdev->pdev;
uint32_t val;
@ -216,7 +207,5 @@ int32_t vmsi_init(struct pci_vdev *vdev)
buf_write32(&vdev->cfgdata.data_8[pdev->msi.capoff], val);
}
return 0;
}

View File

@ -328,74 +328,71 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
return ret;
}
static int32_t vmsix_init_helper(struct pci_vdev *vdev)
/**
* @pre vdev != NULL
* @pre vdev->pdev != NULL
* @pre vdev->pdev->msix.table_bar < (PCI_BAR_COUNT - 1U)
*/
static void vmsix_init_helper(struct pci_vdev *vdev)
{
uint32_t i;
uint64_t addr_hi, addr_lo;
struct pci_msix *msix = &vdev->msix;
struct pci_pdev *pdev = vdev->pdev;
struct pci_bar *bar;
int32_t ret;
ASSERT(vdev->pdev->msix.table_bar < (PCI_BAR_COUNT - 1U), "msix->table_bar out of range");
msix->table_bar = pdev->msix.table_bar;
msix->table_offset = pdev->msix.table_offset;
msix->table_count = pdev->msix.table_count;
if (msix->table_bar < (PCI_BAR_COUNT - 1U)) {
/* Mask all table entries */
for (i = 0U; i < msix->table_count; i++) {
msix->tables[i].vector_control = PCIM_MSIX_VCTRL_MASK;
msix->tables[i].addr = 0U;
msix->tables[i].data = 0U;
}
bar = &pdev->bar[msix->table_bar];
if (bar != NULL) {
vdev->msix.mmio_hpa = bar->base;
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(bar->base);
vdev->msix.mmio_size = bar->size;
}
if (msix->mmio_gpa != 0U) {
/*
* PCI Spec: a BAR may also map other usable address space that is not associated
* with MSI-X structures, but it must not share any naturally aligned 4 KB
* address range with one where either MSI-X structure resides.
* The MSI-X Table and MSI-X PBA are permitted to co-reside within a naturally
* aligned 4 KB address range.
*
* If PBA or others reside in the same BAR with MSI-X Table, devicemodel could
* emulate them and maps these memory range at the 4KB boundary. Here, we should
* make sure only intercept the minimum number of 4K pages needed for MSI-X table.
*/
/* The higher boundary of the 4KB aligned address range for MSI-X table */
addr_hi = msix->mmio_gpa + msix->table_offset + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
addr_hi = round_page_up(addr_hi);
/* The lower boundary of the 4KB aligned address range for MSI-X table */
addr_lo = round_page_down(msix->mmio_gpa + msix->table_offset);
(void)register_mmio_emulation_handler(vdev->vpci->vm, vmsix_table_mmio_access_handler,
addr_lo, addr_hi, vdev);
}
ret = 0;
} else {
pr_err("%s, MSI-X device (%x) invalid table BIR %d", __func__, vdev->pdev->bdf.value, msix->table_bar);
vdev->msix.capoff = 0U;
ret = -EIO;
/* Mask all table entries */
for (i = 0U; i < msix->table_count; i++) {
msix->tables[i].vector_control = PCIM_MSIX_VCTRL_MASK;
msix->tables[i].addr = 0U;
msix->tables[i].data = 0U;
}
return ret;
bar = &pdev->bar[msix->table_bar];
if (bar != NULL) {
vdev->msix.mmio_hpa = bar->base;
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(bar->base);
vdev->msix.mmio_size = bar->size;
}
if (msix->mmio_gpa != 0U) {
/*
* PCI Spec: a BAR may also map other usable address space that is not associated
* with MSI-X structures, but it must not share any naturally aligned 4 KB
* address range with one where either MSI-X structure resides.
* The MSI-X Table and MSI-X PBA are permitted to co-reside within a naturally
* aligned 4 KB address range.
*
* If PBA or others reside in the same BAR with MSI-X Table, devicemodel could
* emulate them and maps these memory range at the 4KB boundary. Here, we should
* make sure only intercept the minimum number of 4K pages needed for MSI-X table.
*/
/* The higher boundary of the 4KB aligned address range for MSI-X table */
addr_hi = msix->mmio_gpa + msix->table_offset + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
addr_hi = round_page_up(addr_hi);
/* The lower boundary of the 4KB aligned address range for MSI-X table */
addr_lo = round_page_down(msix->mmio_gpa + msix->table_offset);
(void)register_mmio_emulation_handler(vdev->vpci->vm, vmsix_table_mmio_access_handler,
addr_lo, addr_hi, vdev);
}
}
/**
* @pre vdev != NULL
*/
int32_t vmsix_init(struct pci_vdev *vdev)
void vmsix_init(struct pci_vdev *vdev)
{
struct pci_pdev *pdev = vdev->pdev;
int32_t ret = 0;
vdev->msix.capoff = pdev->msix.capoff;
vdev->msix.caplen = pdev->msix.caplen;
@ -404,10 +401,8 @@ int32_t vmsix_init(struct pci_vdev *vdev)
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
ret = vmsix_init_helper(vdev);
vmsix_init_helper(vdev);
}
return ret;
}
/**
@ -415,20 +410,11 @@ int32_t vmsix_init(struct pci_vdev *vdev)
* @pre vdev->vpci != NULL
* @pre vdev->vpci->vm != NULL
*/
int32_t vmsix_deinit(const struct pci_vdev *vdev)
void vmsix_deinit(const struct pci_vdev *vdev)
{
if (has_msix_cap(vdev)) {
if (vdev->msix.table_count != 0U) {
ptirq_remove_msix_remapping(vdev->vpci->vm, vdev->vbdf.value, vdev->msix.table_count);
}
}
return 0;
}
const struct pci_vdev_ops pci_ops_vdev_msix = {
.init = vmsix_init,
.deinit = vmsix_deinit,
.cfgwrite = vmsix_cfgwrite,
.cfgread = vmsix_cfgread,
};

View File

@ -81,25 +81,20 @@ int32_t vdev_pt_deinit(const struct pci_vdev *vdev);
#else
extern const struct vpci_ops sharing_mode_vpci_ops;
extern const struct pci_vdev_ops pci_ops_vdev_msi;
extern const struct pci_vdev_ops pci_ops_vdev_msix;
int32_t vmsi_init(struct pci_vdev *vdev);
void vmsi_init(struct pci_vdev *vdev);
int32_t vmsi_cfgread(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
int32_t vmsi_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
int32_t vmsi_deinit(const struct pci_vdev *vdev);
int32_t vmsix_init(struct pci_vdev *vdev);
void vmsi_deinit(const struct pci_vdev *vdev);
void vmsix_init(struct pci_vdev *vdev);
int32_t vmsix_cfgread(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
int32_t vmsix_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
int32_t vmsix_deinit(const struct pci_vdev *vdev);
void vmsix_deinit(const struct pci_vdev *vdev);
#endif
uint32_t pci_vdev_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes);
void pci_vdev_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
void add_vdev_handler(struct pci_vdev *vdev, const struct pci_vdev_ops *ops);
struct pci_vdev *pci_find_vdev_by_vbdf(const struct acrn_vpci *vpci, union pci_bdf vbdf);
struct pci_vdev *pci_find_vdev_by_pbdf(const struct acrn_vpci *vpci, union pci_bdf pbdf);

View File

@ -33,31 +33,19 @@
#include "pci_priv.h"
/**
* @pre vpci != NULL
*/
static struct pci_vdev *sharing_mode_find_vdev_sos(union pci_bdf pbdf)
{
struct acrn_vm *vm;
struct acrn_vpci *vpci;
struct pci_vdev *vdev = NULL;
vm = get_sos_vm();
if (vm != NULL) {
vpci = &vm->vpci;
vdev = pci_find_vdev_by_pbdf(vpci, pbdf);
}
return vdev;
return pci_find_vdev_by_pbdf(&vm->vpci, pbdf);
}
static void sharing_mode_cfgread(__unused struct acrn_vpci *vpci, union pci_bdf bdf,
uint32_t offset, uint32_t bytes, uint32_t *val)
{
struct pci_vdev *vdev;
bool handled = false;
uint32_t i;
vdev = sharing_mode_find_vdev_sos(bdf);
@ -65,16 +53,10 @@ static void sharing_mode_cfgread(__unused struct acrn_vpci *vpci, union pci_bdf
if ((vdev == NULL) || ((bytes != 1U) && (bytes != 2U) && (bytes != 4U))) {
*val = ~0U;
} else {
for (i = 0U; (i < vdev->nr_ops) && (!handled); i++) {
if (vdev->ops[i].cfgread != NULL) {
if (vdev->ops[i].cfgread(vdev, offset, bytes, val) == 0) {
handled = true;
}
}
}
/* Not handled by any handlers, passthru to physical device */
if (!handled) {
if ((vmsi_cfgread(vdev, offset, bytes, val) != 0)
&& (vmsix_cfgread(vdev, offset, bytes, val) != 0)
) {
/* Not handled by any handlers, passthru to physical device */
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
}
}
@ -84,22 +66,14 @@ static void sharing_mode_cfgwrite(__unused struct acrn_vpci *vpci, union pci_bdf
uint32_t offset, uint32_t bytes, uint32_t val)
{
struct pci_vdev *vdev;
bool handled = false;
uint32_t i;
if ((bytes == 1U) || (bytes == 2U) || (bytes == 4U)) {
vdev = sharing_mode_find_vdev_sos(bdf);
if (vdev != NULL) {
for (i = 0U; (i < vdev->nr_ops) && (!handled); i++) {
if (vdev->ops[i].cfgwrite != NULL) {
if (vdev->ops[i].cfgwrite(vdev, offset, bytes, val) == 0) {
handled = true;
}
}
}
/* Not handled by any handlers, passthru to physical device */
if (!handled) {
if ((vmsi_cfgwrite(vdev, offset, bytes, val) != 0)
&& (vmsix_cfgwrite(vdev, offset, bytes, val) != 0)
) {
/* Not handled by any handlers, passthru to physical device */
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
}
}
@ -107,15 +81,14 @@ static void sharing_mode_cfgwrite(__unused struct acrn_vpci *vpci, union pci_bdf
}
/**
* @pre pdev != NULL
* @pre vm != NULL
* @pre pdev_ref != NULL
* @pre vm->vpci->pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
* @pre vdev != NULL
* @pre vm->vpci.pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
*/
static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, struct pci_pdev *pdev_ref)
static void init_vdev_for_pdev(struct pci_pdev *pdev, const void *vm)
{
struct pci_vdev *vdev = NULL;
struct acrn_vpci *vpci = (struct acrn_vpci *)&(vm->vpci);
struct acrn_vpci *vpci = &(((struct acrn_vm *)vm)->vpci);
if (vpci->pci_vdev_cnt < CONFIG_MAX_PCI_DEV_NUM) {
vdev = &vpci->pci_vdevs[vpci->pci_vdev_cnt];
@ -123,35 +96,17 @@ static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, struct pci_pdev
vdev->vpci = vpci;
/* vbdf equals to pbdf otherwise remapped */
vdev->vbdf = pdev_ref->bdf;
vdev->pdev = pdev_ref;
}
vdev->vbdf = pdev->bdf;
vdev->pdev = pdev;
return vdev;
}
vmsi_init(vdev);
static void init_vdev_for_pdev(struct pci_pdev *pdev, const void *cb_data)
{
const struct acrn_vm *vm = (const struct acrn_vm *)cb_data;
struct pci_vdev *vdev;
vdev = alloc_pci_vdev(vm, pdev);
if (vdev != NULL) {
/* Assign MSI handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msi);
/* Assign MSI-X handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msix);
vmsix_init(vdev);
}
}
/**
* @pre vdev != NULL
*/
static int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
{
struct pci_vdev *vdev;
uint32_t i, j;
int32_t ret = -ENODEV;
/*
@ -161,15 +116,6 @@ static int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
if (is_sos_vm(vm)) {
/* Build up vdev array for sos_vm */
pci_pdev_foreach(init_vdev_for_pdev, vm);
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
vdev = (struct pci_vdev *)&(vm->vpci.pci_vdevs[i]);
for (j = 0U; j < vdev->nr_ops; j++) {
if (vdev->ops[j].init != NULL) {
(void)vdev->ops[j].init(vdev);
}
}
}
ret = 0;
}
@ -177,32 +123,22 @@ static int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
}
/**
* @pre vdev != NULL
* @pre vm != NULL
* @pre vm->vpci.pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
*/
static void sharing_mode_vpci_deinit(const struct acrn_vm *vm)
{
struct pci_vdev *vdev;
uint32_t i, j;
uint32_t i;
if (is_sos_vm(vm)) {
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
vdev = (struct pci_vdev *)&(vm->vpci.pci_vdevs[i]);
for (j = 0U; j < vdev->nr_ops; j++) {
if (vdev->ops[j].deinit != NULL) {
(void)vdev->ops[j].deinit(vdev);
}
}
}
}
}
void add_vdev_handler(struct pci_vdev *vdev, const struct pci_vdev_ops *ops)
{
if (vdev->nr_ops >= (MAX_VPCI_DEV_OPS - 1U)) {
pr_err("%s, adding too many handlers", __func__);
} else {
vdev->ops[vdev->nr_ops] = *ops;
vdev->nr_ops++;
vmsi_deinit(vdev);
vmsix_deinit(vdev);
}
}
}

View File

@ -77,11 +77,7 @@ union pci_cfgdata {
};
struct pci_vdev {
#ifndef CONFIG_PARTITION_MODE
#define MAX_VPCI_DEV_OPS 4U
struct pci_vdev_ops ops[MAX_VPCI_DEV_OPS];
uint32_t nr_ops;
#else
#ifdef CONFIG_PARTITION_MODE
const struct pci_vdev_ops *ops;
#endif