HV: use the cached pci device info for sharing mode

Tracked-On: #2431
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
dongshen 2019-01-24 16:34:30 -08:00 committed by ACRN System Integration
parent e0f9d14011
commit 983b717a61
5 changed files with 97 additions and 156 deletions

View File

@ -33,6 +33,7 @@
static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
{
bool ret;
if (vdev->msi.capoff == 0U) {
ret = 0;
} else {
@ -83,7 +84,8 @@ static int32_t vmsi_remap(const struct pci_vdev *vdev, bool enable)
/* Update MSI Capability structure to physical device */
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR, 0x4U, (uint32_t)info.pmsi_addr.full);
if ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) {
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U, (uint32_t)(info.pmsi_addr.full >> 32U));
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U,
(uint32_t)(info.pmsi_addr.full >> 32U));
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA_64BIT, 0x2U, (uint16_t)info.pmsi_data.full);
} else {
pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA, 0x2U, (uint16_t)info.pmsi_data.full);
@ -166,68 +168,53 @@ static const struct pci_vdev_ops pci_ops_vdev_msi = {
.cfgread = vmsi_cfgread,
};
/* Read a uint32_t from buffer (little endian) */
static uint32_t buf_read32(const uint8_t buf[])
{
return buf[0] | ((uint32_t)buf[1] << 8U) | ((uint32_t)buf[2] << 16U) | ((uint32_t)buf[3] << 24U);
}
/* Write a uint32_t to buffer (little endian) */
static void buf_write32(uint8_t buf[], uint32_t val)
{
buf[0] = (uint8_t)(val & 0xFFU);
buf[1] = (uint8_t)((val >> 8U) & 0xFFU);
buf[2] = (uint8_t)((val >> 16U) & 0xFFU);
buf[3] = (uint8_t)((val >> 24U) & 0xFFU);
}
void populate_msi_struct(struct pci_vdev *vdev)
{
uint8_t ptr, cap;
uint32_t msgctrl;
uint32_t len, bytes, offset, val;
union pci_bdf pbdf = vdev->pdev.bdf;
struct pci_pdev *pdev = &vdev->pdev;
uint32_t val;
/* Has new Capabilities list? */
if ((pci_pdev_read_cfg(pbdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) != 0U) {
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, PCIR_CAP_PTR, 1U);
while ((ptr != 0U) && (ptr != 0xFFU)) {
cap = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_ID, 1U);
/* Ignore all other Capability IDs for now */
if ((cap == PCIY_MSI) || (cap == PCIY_MSIX)) {
offset = ptr;
if (cap == PCIY_MSI) {
vdev->msi.capoff = offset;
msgctrl = pci_pdev_read_cfg(pbdf, offset + PCIR_MSI_CTRL, 2U);
/*
* Ignore the 'mask' and 'pending' bits in the MSI capability
* (msgctrl & PCIM_MSICTRL_VECTOR).
* We'll let the guest manipulate them directly.
*/
len = ((msgctrl & PCIM_MSICTRL_64BIT) != 0U) ? 14U : 10U;
vdev->msi.caplen = len;
/* Copy MSI/MSI-X capability struct into virtual device */
if (pdev->msi.capoff != 0U) {
vdev->msi.capoff = pdev->msi.capoff;
vdev->msi.caplen = pdev->msi.caplen;
/* Assign MSI handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msi);
} else {
vdev->msix.capoff = offset;
vdev->msix.caplen = MSIX_CAPLEN;
len = vdev->msix.caplen;
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msi.capoff], pdev->msi.caplen,
(void *)&pdev->msi.cap[0U], pdev->msi.caplen);
val = buf_read32(&pdev->msi.cap[0U]);
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
buf_write32(&vdev->cfgdata.data_8[pdev->msi.capoff], val);
}
if (pdev->msix.capoff != 0U) {
vdev->msix.capoff = pdev->msix.capoff;
vdev->msix.caplen = pdev->msix.caplen;
/* Assign MSI-X handler for configuration read and write */
add_vdev_handler(vdev, &pci_ops_vdev_msix);
}
/* Copy MSI/MSI-X capability struct into virtual device */
while (len > 0U) {
bytes = (len >= 4U) ? 4U : len;
val = pci_pdev_read_cfg(pbdf, offset, bytes);
if ((cap == PCIY_MSI) && (offset == vdev->msi.capoff)) {
/*
* Don't support multiple vector for now,
* Force Multiple Message Enable and Multiple Message
* Capable to 0
*/
val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
}
pci_vdev_write_cfg(vdev, offset, bytes, val);
len -= bytes;
offset += bytes;
}
}
ptr = (uint8_t)pci_pdev_read_cfg(pbdf, ptr + PCICAP_NEXTPTR, 1U);
}
(void)memcpy_s((void *)&vdev->cfgdata.data_8[pdev->msix.capoff], pdev->msix.caplen,
(void *)&pdev->msix.cap[0U], pdev->msix.caplen);
}
}

View File

@ -189,7 +189,6 @@ static int32_t vmsix_cfgwrite(struct pci_vdev *vdev, uint32_t offset, uint32_t b
pci_pdev_write_cfg(vdev->pdev.bdf, offset, 2U, val);
}
}
ret = 0;
} else {
ret = -ENODEV;
@ -230,6 +229,7 @@ static void vmsix_table_rw(struct pci_vdev *vdev, struct mmio_request *mmio, uin
*/
if (entry_offset < offsetof(struct msix_table_entry, data)) {
uint64_t qword_mask = ~0UL;
if (mmio->size == 4U) {
qword_mask = (entry_offset == 0U) ?
0x00000000FFFFFFFFUL : 0xFFFFFFFF00000000UL;
@ -311,67 +311,18 @@ static int32_t vmsix_table_mmio_access_handler(struct io_request *io_req, void *
return ret;
}
static void decode_msix_table_bar(struct pci_vdev *vdev)
{
uint32_t bir = vdev->msix.table_bar;
union pci_bdf pbdf = vdev->pdev.bdf;
uint64_t base, size;
uint32_t bar_lo, bar_hi, val32;
bar_lo = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir), 4U);
if ((bar_lo & PCIM_BAR_SPACE) != PCIM_BAR_IO_SPACE) {
/* Get the base address */
base = (uint64_t)bar_lo & PCIM_BAR_MEM_BASE;
if ((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) {
bar_hi = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir + 1U), 4U);
base |= ((uint64_t)bar_hi << 32U);
}
vdev->msix.mmio_hva = (uint64_t)hpa2hva(base);
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(base);
/* Sizing the BAR */
size = 0U;
if (((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) && (bir < (PCI_BAR_COUNT - 1U))) {
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir + 1U), 4U, ~0U);
size = (uint64_t)pci_pdev_read_cfg(pbdf, pci_bar_offset(bir + 1U), 4U);
size <<= 32U;
}
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir), 4U, ~0U);
val32 = pci_pdev_read_cfg(pbdf, pci_bar_offset(bir), 4U);
size |= ((uint64_t)val32 & PCIM_BAR_MEM_BASE);
vdev->msix.mmio_size = size & ~(size - 1U);
/* Restore the BAR */
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir), 4U, bar_lo);
if ((bar_lo & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) {
pci_pdev_write_cfg(pbdf, pci_bar_offset(bir + 1U), 4U, bar_hi);
}
} else {
/* I/O bar, should never happen */
pr_err("PCI device (%x) has MSI-X Table at IO BAR", vdev->vbdf.value);
}
}
static int32_t vmsix_init(struct pci_vdev *vdev)
{
uint32_t msgctrl;
uint32_t table_info, i;
uint32_t i;
uint64_t addr_hi, addr_lo;
struct pci_msix *msix = &vdev->msix;
struct pci_pdev *pdev = &vdev->pdev;
struct pci_bar *bar;
int32_t ret;
msgctrl = pci_pdev_read_cfg(vdev->pdev.bdf, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
/* Read Table Offset and Table BIR */
table_info = pci_pdev_read_cfg(vdev->pdev.bdf, msix->capoff + PCIR_MSIX_TABLE, 4U);
msix->table_bar = table_info & PCIM_MSIX_BIR_MASK;
msix->table_offset = table_info & ~PCIM_MSIX_BIR_MASK;
msix->table_count = (msgctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1U;
msix->table_bar = pdev->msix.table_bar;
msix->table_offset = pdev->msix.table_offset;
msix->table_count = pdev->msix.table_count;
if (msix->table_bar < (PCI_BAR_COUNT - 1U)) {
/* Mask all table entries */
@ -381,7 +332,12 @@ static int32_t vmsix_init(struct pci_vdev *vdev)
msix->tables[i].data = 0U;
}
decode_msix_table_bar(vdev);
bar = &pdev->bar[msix->table_bar];
if (bar != NULL) {
vdev->msix.mmio_hva = (uint64_t)hpa2hva(bar->base);
vdev->msix.mmio_gpa = sos_vm_hpa2gpa(bar->base);
vdev->msix.mmio_size = bar->size;
}
if (msix->mmio_gpa != 0U) {
/*

View File

@ -102,31 +102,32 @@ static void sharing_mode_cfgwrite(__unused struct acrn_vpci *vpci, union pci_bdf
}
}
static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, union pci_bdf bdf)
static struct pci_vdev *alloc_pci_vdev(const struct acrn_vm *vm, const struct pci_pdev *pdev_ref)
{
struct pci_vdev *vdev;
struct pci_vdev *vdev = NULL;
if (num_pci_vdev < CONFIG_MAX_PCI_DEV_NUM) {
vdev = &sharing_mode_vdev_array[num_pci_vdev];
num_pci_vdev++;
/* vbdf equals to pbdf otherwise remapped */
vdev->vbdf = bdf;
if ((vm != NULL) && (vdev != NULL) && (pdev_ref != NULL)) {
vdev->vpci = &vm->vpci;
vdev->pdev.bdf = bdf;
} else {
vdev = NULL;
/* vbdf equals to pbdf otherwise remapped */
vdev->vbdf = pdev_ref->bdf;
(void)memcpy_s((void *)&vdev->pdev, sizeof(struct pci_pdev),
(const void *)pdev_ref, sizeof(struct pci_pdev));
}
}
return vdev;
}
static void enumerate_pci_dev(uint16_t pbdf, const void *cb_data)
static void init_vdev_for_pdev(const struct pci_pdev *pdev, const void *cb_data)
{
const struct acrn_vm *vm = (const struct acrn_vm *)cb_data;
struct pci_vdev *vdev;
vdev = alloc_pci_vdev(vm, (union pci_bdf)pbdf);
vdev = alloc_pci_vdev(vm, pdev);
if (vdev != NULL) {
populate_msi_struct(vdev);
}
@ -145,12 +146,8 @@ static int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
if (!is_sos_vm(vm)) {
ret = -ENODEV;
} else {
/* Initialize PCI vdev array */
num_pci_vdev = 0U;
(void)memset((void *)sharing_mode_vdev_array, 0U, sizeof(sharing_mode_vdev_array));
/* build up vdev array for sos_vm */
pci_scan_bus(enumerate_pci_dev, vm);
/* Build up vdev array for sos_vm */
pci_pdev_foreach(init_vdev_for_pdev, vm);
for (i = 0U; i < num_pci_vdev; i++) {
vdev = &sharing_mode_vdev_array[i];

View File

@ -36,6 +36,8 @@ static spinlock_t pci_device_lock;
static uint32_t num_pci_pdev;
static struct pci_pdev pci_pdev_array[CONFIG_MAX_PCI_DEV_NUM];
static void init_pdev(uint16_t pbdf);
static uint32_t pci_pdev_calc_address(union pci_bdf bdf, uint32_t offset)
{
@ -122,7 +124,7 @@ void enable_disable_pci_intx(union pci_bdf bdf, bool enable)
#define BUS_SCAN_SKIP 0U
#define BUS_SCAN_PENDING 1U
#define BUS_SCAN_COMPLETE 2U
void pci_scan_bus(pci_enumeration_cb cb_func, const void *cb_data)
void init_pci_pdev_list(void)
{
union pci_bdf pbdf;
uint8_t hdr_type, secondary_bus, dev, func;
@ -163,9 +165,7 @@ void pci_scan_bus(pci_enumeration_cb cb_func, const void *cb_data)
continue;
}
if (cb_func != NULL) {
cb_func(pbdf.value, cb_data);
}
init_pdev(pbdf.value);
hdr_type = (uint8_t)pci_pdev_read_cfg(pbdf, PCIR_HDRTYPE, 1U);
if ((hdr_type & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) {
@ -380,22 +380,24 @@ static void fill_pdev(uint16_t pbdf, struct pci_pdev *pdev)
}
}
static void init_pdev(uint16_t pbdf, __unused const void *cb_data)
static void init_pdev(uint16_t pbdf)
{
static struct pci_pdev *curpdev = NULL;
if (num_pci_pdev < CONFIG_MAX_PCI_DEV_NUM) {
curpdev = &pci_pdev_array[num_pci_pdev];
fill_pdev(pbdf, &pci_pdev_array[num_pci_pdev]);
num_pci_pdev++;
fill_pdev(pbdf, curpdev);
} else {
pr_err("%s, failed to alloc pci_pdev!\n", __func__);
}
}
void init_pci_pdev_list(void)
void pci_pdev_foreach(pci_pdev_enumeration_cb cb_func, const void *ctx)
{
/* Build up pdev array */
pci_scan_bus(init_pdev, NULL);
uint32_t idx;
for (idx = 0U; idx < num_pci_pdev; idx++) {
if (cb_func != NULL) {
cb_func(&pci_pdev_array[idx], ctx);
}
}
}

View File

@ -179,8 +179,7 @@ struct pci_pdev {
struct pci_msix_cap msix;
};
typedef void (*pci_enumeration_cb)(uint16_t pbdf, const void *data);
typedef void (*pci_pdev_enumeration_cb)(const struct pci_pdev *pdev, const void *data);
static inline uint32_t pci_bar_offset(uint32_t idx)
{
@ -225,7 +224,7 @@ uint32_t pci_pdev_read_cfg(union pci_bdf bdf, uint32_t offset, uint32_t bytes);
void pci_pdev_write_cfg(union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
void enable_disable_pci_intx(union pci_bdf bdf, bool enable);
void pci_scan_bus(pci_enumeration_cb cb, const void *cb_data);
void pci_pdev_foreach(pci_pdev_enumeration_cb cb, const void *ctx);
void init_pci_pdev_list(void);