diff --git a/hypervisor/Makefile b/hypervisor/Makefile index 211a2d3d0..678675798 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -294,6 +294,7 @@ VP_DM_C_SRCS += dm/vpci/vpci_bridge.c VP_DM_C_SRCS += dm/vpci/pci_pt.c VP_DM_C_SRCS += dm/vpci/vmsi.c VP_DM_C_SRCS += dm/vpci/vmsix.c +VP_DM_C_SRCS += dm/vpci/vsriov.c VP_DM_C_SRCS += arch/x86/guest/vlapic.c VP_DM_C_SRCS += arch/x86/guest/pm.c VP_DM_C_SRCS += arch/x86/guest/assign.c diff --git a/hypervisor/dm/vpci/vpci.c b/hypervisor/dm/vpci/vpci.c index 42b167b5f..aa2f2c10d 100644 --- a/hypervisor/dm/vpci/vpci.c +++ b/hypervisor/dm/vpci/vpci.c @@ -357,6 +357,7 @@ static void vpci_init_pt_dev(struct pci_vdev *vdev) */ init_vmsi(vdev); init_vmsix(vdev); + init_vsriov(vdev); init_vdev_pt(vdev); assign_vdev_pt_iommu_domain(vdev); @@ -381,6 +382,8 @@ static int32_t vpci_write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset, vmsi_write_cfg(vdev, offset, bytes, val); } else if (msixcap_access(vdev, offset)) { vmsix_write_cfg(vdev, offset, bytes, val); + } else if (sriovcap_access(vdev, offset)) { + write_sriov_cap_reg(vdev, offset, bytes, val); } else if (offset == PCIR_COMMAND) { vdev_pt_write_command(vdev, (bytes > 2U) ? 2U : bytes, (uint16_t)val); } else { @@ -410,6 +413,8 @@ static int32_t vpci_read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset vmsi_read_cfg(vdev, offset, bytes, val); } else if (msixcap_access(vdev, offset)) { vmsix_read_cfg(vdev, offset, bytes, val); + } else if (sriovcap_access(vdev, offset)) { + read_sriov_cap_reg(vdev, offset, bytes, val); } else { if (is_postlaunched_vm(vdev->vpci->vm) && in_range(offset, PCIR_INTERRUPT_LINE, 4U)) { diff --git a/hypervisor/dm/vpci/vpci_priv.h b/hypervisor/dm/vpci/vpci_priv.h index db9acb25f..47e7f991c 100644 --- a/hypervisor/dm/vpci/vpci_priv.h +++ b/hypervisor/dm/vpci/vpci_priv.h @@ -101,6 +101,22 @@ static inline bool msixcap_access(const struct pci_vdev *vdev, uint32_t offset) return (has_msix_cap(vdev) && in_range(offset, vdev->msix.capoff, vdev->msix.caplen)); } +/* + * @pre vdev != NULL + */ +static inline bool has_sriov_cap(const struct pci_vdev *vdev) +{ + return (vdev->sriov.capoff != 0U); +} + +/* + * @pre vdev != NULL + */ +static inline bool sriovcap_access(const struct pci_vdev *vdev, uint32_t offset) +{ + return (has_sriov_cap(vdev) && in_range(offset, vdev->sriov.capoff, vdev->sriov.caplen)); +} + /** * @pre vdev != NULL */ @@ -140,6 +156,10 @@ void vmsix_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes void vmsix_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val); void deinit_vmsix(const struct pci_vdev *vdev); +void init_vsriov(struct pci_vdev *vdev); +void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val); +void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val); + uint32_t pci_vdev_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes); void pci_vdev_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val); diff --git a/hypervisor/dm/vpci/vsriov.c b/hypervisor/dm/vpci/vsriov.c new file mode 100644 index 000000000..fa48ac671 --- /dev/null +++ b/hypervisor/dm/vpci/vsriov.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2011 NetApp, Inc. + * Copyright (c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include + +/** + * @pre vdev != NULL + * @pre vdev->pdev != NULL + */ +void init_vsriov(struct pci_vdev *vdev) +{ + struct pci_pdev *pdev = vdev->pdev; + + vdev->sriov.capoff = pdev->sriov.capoff; + vdev->sriov.caplen = pdev->sriov.caplen; +} + +/** + * @pre vdev != NULL + * @pre vdev->pdev != NULL + */ +void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val) +{ + /* no need to do emulation, passthrough to physical device directly */ + *val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes); +} + +/** + * @pre vdev != NULL + * @pre vdev->pdev != NULL + */ +void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val) +{ + /* Needs to intercept VF_ENABLE and add it in next patch. */ + pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val); + +} diff --git a/hypervisor/include/dm/vpci.h b/hypervisor/include/dm/vpci.h index bd5483aed..defb075c3 100644 --- a/hypervisor/include/dm/vpci.h +++ b/hypervisor/include/dm/vpci.h @@ -69,6 +69,12 @@ struct pci_msix { uint32_t table_count; }; +/* SRIOV capability structure */ +struct pci_cap_sriov { + uint32_t capoff; + uint32_t caplen; +}; + union pci_cfgdata { uint8_t data_8[PCIE_CONFIG_SPACE_SIZE]; uint16_t data_16[PCIE_CONFIG_SPACE_SIZE >> 1U]; @@ -98,6 +104,7 @@ struct pci_vdev { struct pci_msi msi; struct pci_msix msix; + struct pci_cap_sriov sriov; /* Pointer to corresponding PCI device's vm_config */ struct acrn_vm_pci_dev_config *pci_dev_config;