hv: move vmsix functions to pci_pt.c

vmsix.c originally covers ptdev case but ACRN hypervisor
  need to support pure virtual PCI mediator, such as ivshmem
  device in this patch set.

  For better understanding the code changes from patch
  perspective, split the changes to several small patches.

  This patch moves most original vmsix code to pci_pt.c
  as they're mixed with ptdev specific operations.

  The subsequent patches will start the detail abstraction change.

Tracked-On: #5407
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
Reviewed-by: Li Fei <fei1.li@intel.com>
Reviewed-by: Wang, Yu1 <yu1.wang@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Yonghua Huang 2020-10-10 17:03:49 +08:00 committed by wenlingz
parent 4ade4473ae
commit 1a252b5f18
3 changed files with 250 additions and 247 deletions

View File

@ -27,11 +27,218 @@
* $FreeBSD$
*/
#include <vm.h>
#include <errno.h>
#include <ptdev.h>
#include <assign.h>
#include <vtd.h>
#include <ept.h>
#include <mmu.h>
#include <io.h>
#include <logmsg.h>
#include "vpci_priv.h"
/**
* @pre vdev != NULL
*/
static inline struct msix_table_entry *get_msix_table_entry(const struct pci_vdev *vdev, uint32_t index)
{
void *hva = hpa2hva(vdev->msix.mmio_hpa + vdev->msix.table_offset);
return ((struct msix_table_entry *)hva + index);
}
/**
* @brief Writing MSI-X Capability Structure
*
* @pre vdev != NULL
* @pre vdev->pdev != NULL
*/
void write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
{
static const uint8_t msix_ro_mask[12U] = {
0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
0xffU, 0xffU, 0xffU, 0xffU,
0xffU, 0xffU, 0xffU, 0xffU };
uint32_t msgctrl, old, ro_mask = ~0U;
(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
if (ro_mask != ~0U) {
old = pci_vdev_read_vcfg(vdev, offset, bytes);
pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
msgctrl = pci_vdev_read_vcfg(vdev, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
/* If MSI Enable is being set, make sure INTxDIS bit is set */
if ((msgctrl & PCIM_MSIXCTRL_MSIX_ENABLE) != 0U) {
enable_disable_pci_intx(vdev->pdev->bdf, false);
}
pci_pdev_write_cfg(vdev->pdev->bdf, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U, msgctrl);
}
}
/**
* @pre vdev != NULL
*/
static void mask_one_msix_vector(const struct pci_vdev *vdev, uint32_t index)
{
uint32_t vector_control;
struct msix_table_entry *pentry = get_msix_table_entry(vdev, index);
stac();
vector_control = pentry->vector_control | PCIM_MSIX_VCTRL_MASK;
mmio_write32(vector_control, (void *)&(pentry->vector_control));
clac();
}
/**
* @pre vdev != NULL
* @pre vdev->vpci != NULL
* @pre vdev->pdev != NULL
*/
static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
{
const struct msix_table_entry *ventry;
struct msix_table_entry *pentry;
struct msi_info info = {};
int32_t ret;
mask_one_msix_vector(vdev, index);
ventry = &vdev->msix.table_entries[index];
if ((ventry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0U) {
info.addr.full = vdev->msix.table_entries[index].addr;
info.data.full = vdev->msix.table_entries[index].data;
ret = ptirq_prepare_msix_remap(vpci2vm(vdev->vpci), vdev->bdf.value, vdev->pdev->bdf.value,
(uint16_t)index, &info, INVALID_IRTE_ID);
if (ret == 0) {
/* Write the table entry to the physical structure */
pentry = get_msix_table_entry(vdev, index);
/*
* PCI 3.0 Spec allows writing to Message Address and Message Upper Address
* fields with a single QWORD write, but some hardware can accept 32 bits
* write only
*/
stac();
mmio_write32((uint32_t)(info.addr.full), (void *)&(pentry->addr));
mmio_write32((uint32_t)(info.addr.full >> 32U), (void *)((char *)&(pentry->addr) + 4U));
mmio_write32(info.data.full, (void *)&(pentry->data));
mmio_write32(vdev->msix.table_entries[index].vector_control, (void *)&(pentry->vector_control));
clac();
}
}
}
/**
* @pre vdev != NULL
* @pre mmio != NULL
*/
static void rw_vmsix_table(struct pci_vdev *vdev, struct mmio_request *mmio, uint32_t offset)
{
struct msix_table_entry *entry;
uint32_t entry_offset, table_offset, index;
/* Find out which entry it's accessing */
table_offset = offset - vdev->msix.table_offset;
index = table_offset / MSIX_TABLE_ENTRY_SIZE;
if (index < vdev->msix.table_count) {
entry = &vdev->msix.table_entries[index];
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
if (mmio->direction == REQUEST_READ) {
(void)memcpy_s(&mmio->value, (size_t)mmio->size,
(void *)entry + entry_offset, (size_t)mmio->size);
} else {
/* Only DWORD and QWORD are permitted */
if ((mmio->size == 4U) || (mmio->size == 8U)) {
/* Write to pci_vdev */
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
&mmio->value, (size_t)mmio->size);
if (vdev->msix.is_vmsix_on_msi) {
remap_one_vmsix_entry_on_msi(vdev, index);
} else {
remap_one_vmsix_entry(vdev, index);
}
} else {
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
}
}
} else {
pr_err("%s, invalid arguments %lx - %lx", __func__, mmio->value, mmio->address);
}
}
/**
* @pre io_req != NULL
* @pre handler_private_data != NULL
*/
static int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *handler_private_data)
{
struct mmio_request *mmio = &io_req->reqs.mmio;
struct pci_vdev *vdev;
int32_t ret = 0;
uint64_t offset;
void *hva;
vdev = (struct pci_vdev *)handler_private_data;
/* This device has not be assigned to other OS */
if (vdev->user == vdev) {
offset = mmio->address - vdev->msix.mmio_gpa;
if (msixtable_access(vdev, (uint32_t)offset)) {
rw_vmsix_table(vdev, mmio, (uint32_t)offset);
} else if (vdev->msix.is_vmsix_on_msi) {
/* According to PCI spec, PBA is read-only.
* Don't emulate PBA according to the device status, just return 0.
*/
if (mmio->direction == REQUEST_READ) {
mmio->value = 0UL;
} else {
ret = -EINVAL;
}
} else {
hva = hpa2hva(vdev->msix.mmio_hpa + offset);
/* Only DWORD and QWORD are permitted */
if ((mmio->size == 4U) || (mmio->size == 8U)) {
if (hva != NULL) {
stac();
/* MSI-X PBA and Capability Table could be in the same range */
if (mmio->direction == REQUEST_READ) {
/* mmio->size is either 4U or 8U */
if (mmio->size == 4U) {
mmio->value = (uint64_t)mmio_read32((const void *)hva);
} else {
mmio->value = mmio_read64((const void *)hva);
}
} else {
/* mmio->size is either 4U or 8U */
if (mmio->size == 4U) {
mmio_write32((uint32_t)(mmio->value), (void *)hva);
} else {
mmio_write64(mmio->value, (void *)hva);
}
}
clac();
}
} else {
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
ret = -EINVAL;
}
}
} else {
ret = -EFAULT;
}
return ret;
}
/*
* @pre vdev != NULL
* @pre vdev->vpci != NULL
@ -326,6 +533,41 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
}
}
/**
* @pre vdev != NULL
* @pre vdev->pdev != NULL
*/
void init_vmsix(struct pci_vdev *vdev)
{
struct pci_pdev *pdev = vdev->pdev;
vdev->msix.capoff = pdev->msix.capoff;
vdev->msix.caplen = pdev->msix.caplen;
vdev->msix.table_bar = pdev->msix.table_bar;
vdev->msix.table_offset = pdev->msix.table_offset;
vdev->msix.table_count = pdev->msix.table_count;
if (has_msix_cap(vdev)) {
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
}
}
/**
* @pre vdev != NULL
* @pre vdev->vpci != NULL
*/
void deinit_vmsix(struct pci_vdev *vdev)
{
if (has_msix_cap(vdev)) {
if (vdev->msix.table_count != 0U) {
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->pdev->bdf.value, vdev->msix.table_count);
(void)memset((void *)&vdev->msix.table_entries, 0U, sizeof(vdev->msix.table_entries));
vdev->msix.is_vmsix_on_msi_programmed = false;
}
}
}
void vdev_pt_hide_sriov_cap(struct pci_vdev *vdev)
{
uint32_t pre_pos = vdev->pdev->sriov.pre_pos;

View File

@ -29,88 +29,12 @@
#include <vm.h>
#include <errno.h>
#include <ptdev.h>
#include <assign.h>
#include <vpci.h>
#include <io.h>
#include <ept.h>
#include <mmu.h>
#include <logmsg.h>
#include <vtd.h>
#include "vpci_priv.h"
/**
* @pre vdev != NULL
*/
static inline bool msixtable_access(const struct pci_vdev *vdev, uint32_t offset)
{
return in_range(offset, vdev->msix.table_offset, vdev->msix.table_count * MSIX_TABLE_ENTRY_SIZE);
}
/**
* @pre vdev != NULL
*/
static inline struct msix_table_entry *get_msix_table_entry(const struct pci_vdev *vdev, uint32_t index)
{
void *hva = hpa2hva(vdev->msix.mmio_hpa + vdev->msix.table_offset);
return ((struct msix_table_entry *)hva + index);
}
/**
* @pre vdev != NULL
*/
static void mask_one_msix_vector(const struct pci_vdev *vdev, uint32_t index)
{
uint32_t vector_control;
struct msix_table_entry *pentry = get_msix_table_entry(vdev, index);
stac();
vector_control = pentry->vector_control | PCIM_MSIX_VCTRL_MASK;
mmio_write32(vector_control, (void *)&(pentry->vector_control));
clac();
}
/**
* @pre vdev != NULL
* @pre vdev->vpci != NULL
* @pre vdev->pdev != NULL
*/
static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
{
const struct msix_table_entry *ventry;
struct msix_table_entry *pentry;
struct msi_info info = {};
int32_t ret;
mask_one_msix_vector(vdev, index);
ventry = &vdev->msix.table_entries[index];
if ((ventry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0U) {
info.addr.full = vdev->msix.table_entries[index].addr;
info.data.full = vdev->msix.table_entries[index].data;
ret = ptirq_prepare_msix_remap(vpci2vm(vdev->vpci), vdev->bdf.value, vdev->pdev->bdf.value,
(uint16_t)index, &info, INVALID_IRTE_ID);
if (ret == 0) {
/* Write the table entry to the physical structure */
pentry = get_msix_table_entry(vdev, index);
/*
* PCI 3.0 Spec allows writing to Message Address and Message Upper Address
* fields with a single QWORD write, but some hardware can accept 32 bits
* write only
*/
stac();
mmio_write32((uint32_t)(info.addr.full), (void *)&(pentry->addr));
mmio_write32((uint32_t)(info.addr.full >> 32U), (void *)((char *)&(pentry->addr) + 4U));
mmio_write32(info.data.full, (void *)&(pentry->data));
mmio_write32(vdev->msix.table_entries[index].vector_control, (void *)&(pentry->vector_control));
clac();
}
}
}
/**
* @pre vdev != NULL
*/
@ -119,173 +43,3 @@ void read_vmsix_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t b
/* For PIO access, we emulate Capability Structures only */
*val = pci_vdev_read_vcfg(vdev, offset, bytes);
}
/**
* @brief Writing MSI-X Capability Structure
*
* @pre vdev != NULL
* @pre vdev->pdev != NULL
*/
void write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
{
static const uint8_t msix_ro_mask[12U] = {
0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
0xffU, 0xffU, 0xffU, 0xffU,
0xffU, 0xffU, 0xffU, 0xffU };
uint32_t msgctrl, old, ro_mask = ~0U;
(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
if (ro_mask != ~0U) {
old = pci_vdev_read_vcfg(vdev, offset, bytes);
pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
msgctrl = pci_vdev_read_vcfg(vdev, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
/* If MSI Enable is being set, make sure INTxDIS bit is set */
if ((msgctrl & PCIM_MSIXCTRL_MSIX_ENABLE) != 0U) {
enable_disable_pci_intx(vdev->pdev->bdf, false);
}
pci_pdev_write_cfg(vdev->pdev->bdf, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U, msgctrl);
}
}
/**
* @pre vdev != NULL
* @pre mmio != NULL
*/
static void rw_vmsix_table(struct pci_vdev *vdev, struct mmio_request *mmio, uint32_t offset)
{
struct msix_table_entry *entry;
uint32_t entry_offset, table_offset, index;
/* Find out which entry it's accessing */
table_offset = offset - vdev->msix.table_offset;
index = table_offset / MSIX_TABLE_ENTRY_SIZE;
if (index < vdev->msix.table_count) {
entry = &vdev->msix.table_entries[index];
entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
if (mmio->direction == REQUEST_READ) {
(void)memcpy_s(&mmio->value, (size_t)mmio->size,
(void *)entry + entry_offset, (size_t)mmio->size);
} else {
/* Only DWORD and QWORD are permitted */
if ((mmio->size == 4U) || (mmio->size == 8U)) {
/* Write to pci_vdev */
(void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
&mmio->value, (size_t)mmio->size);
if (vdev->msix.is_vmsix_on_msi) {
remap_one_vmsix_entry_on_msi(vdev, index);
} else {
remap_one_vmsix_entry(vdev, index);
}
} else {
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
}
}
} else {
pr_err("%s, invalid arguments %lx - %lx", __func__, mmio->value, mmio->address);
}
}
/**
* @pre io_req != NULL
* @pre handler_private_data != NULL
*/
int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *handler_private_data)
{
struct mmio_request *mmio = &io_req->reqs.mmio;
struct pci_vdev *vdev;
int32_t ret = 0;
uint64_t offset;
void *hva;
vdev = (struct pci_vdev *)handler_private_data;
/* This device has not be assigned to other OS */
if (vdev->user == vdev) {
offset = mmio->address - vdev->msix.mmio_gpa;
if (msixtable_access(vdev, (uint32_t)offset)) {
rw_vmsix_table(vdev, mmio, (uint32_t)offset);
} else if (vdev->msix.is_vmsix_on_msi) {
/* According to PCI spec, PBA is read-only.
* Don't emulate PBA according to the device status, just return 0.
*/
if (mmio->direction == REQUEST_READ) {
mmio->value = 0UL;
} else {
ret = -EINVAL;
}
} else {
hva = hpa2hva(vdev->msix.mmio_hpa + offset);
/* Only DWORD and QWORD are permitted */
if ((mmio->size == 4U) || (mmio->size == 8U)) {
if (hva != NULL) {
stac();
/* MSI-X PBA and Capability Table could be in the same range */
if (mmio->direction == REQUEST_READ) {
/* mmio->size is either 4U or 8U */
if (mmio->size == 4U) {
mmio->value = (uint64_t)mmio_read32((const void *)hva);
} else {
mmio->value = mmio_read64((const void *)hva);
}
} else {
/* mmio->size is either 4U or 8U */
if (mmio->size == 4U) {
mmio_write32((uint32_t)(mmio->value), (void *)hva);
} else {
mmio_write64(mmio->value, (void *)hva);
}
}
clac();
}
} else {
pr_err("%s, Only DWORD and QWORD are permitted", __func__);
ret = -EINVAL;
}
}
} else {
ret = -EFAULT;
}
return ret;
}
/**
* @pre vdev != NULL
* @pre vdev->pdev != NULL
*/
void init_vmsix(struct pci_vdev *vdev)
{
struct pci_pdev *pdev = vdev->pdev;
vdev->msix.capoff = pdev->msix.capoff;
vdev->msix.caplen = pdev->msix.caplen;
vdev->msix.table_bar = pdev->msix.table_bar;
vdev->msix.table_offset = pdev->msix.table_offset;
vdev->msix.table_count = pdev->msix.table_count;
if (has_msix_cap(vdev)) {
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
}
}
/**
* @pre vdev != NULL
* @pre vdev->vpci != NULL
*/
void deinit_vmsix(struct pci_vdev *vdev)
{
if (has_msix_cap(vdev)) {
if (vdev->msix.table_count != 0U) {
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->pdev->bdf.value, vdev->msix.table_count);
(void)memset((void *)&vdev->msix.table_entries, 0U, sizeof(vdev->msix.table_entries));
vdev->msix.is_vmsix_on_msi_programmed = false;
}
}
}

View File

@ -64,6 +64,14 @@ static inline bool msixcap_access(const struct pci_vdev *vdev, uint32_t offset)
return (has_msix_cap(vdev) && in_range(offset, vdev->msix.capoff, vdev->msix.caplen));
}
/**
* @pre vdev != NULL
*/
static inline bool msixtable_access(const struct pci_vdev *vdev, uint32_t offset)
{
return in_range(offset, vdev->msix.table_offset, vdev->msix.table_count * MSIX_TABLE_ENTRY_SIZE);
}
/*
* @pre vdev != NULL
*/
@ -137,7 +145,6 @@ void write_vmsi_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes,
void deinit_vmsi(const struct pci_vdev *vdev);
void init_vmsix(struct pci_vdev *vdev);
int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *handler_private_data);
void read_vmsix_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
void write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
void deinit_vmsix(struct pci_vdev *vdev);