diff --git a/hypervisor/dm/vpci/pci_pt.c b/hypervisor/dm/vpci/pci_pt.c index 9baa20020..c39b31777 100644 --- a/hypervisor/dm/vpci/pci_pt.c +++ b/hypervisor/dm/vpci/pci_pt.c @@ -66,7 +66,7 @@ static void vdev_pt_unmap_msix(struct pci_vdev *vdev) * @pre vdev->vpci != NULL * @pre vdev->vpci->vm != NULL */ -static void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock) +void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock) { struct pci_vbar *vbar; uint64_t addr_hi, addr_lo; @@ -262,10 +262,6 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar) } pbdf.value = vdev->pdev->bdf.value; - for (offset = 0U; offset < PCI_CFG_HEADER_LENGTH; offset += 4U) { - pci_vdev_write_vcfg(vdev, offset, 4U, pci_pdev_read_cfg(pbdf, offset, 4U)); - } - for (idx = 0U; idx < bar_cnt; idx++) { if (is_sriov_bar) { vbar = &vdev->sriov.vbars[idx]; @@ -375,10 +371,14 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar) void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev) { uint16_t pci_command; + uint32_t offset; - if (vdev->phyfun != NULL) { - init_sriov_vf_vdev(vdev); - } else { + for (offset = 0U; offset < PCI_CFG_HEADER_LENGTH; offset += 4U) { + pci_vdev_write_vcfg(vdev, offset, 4U, pci_pdev_read_cfg(vdev->pdev->bdf, offset, 4U)); + } + + /* Initialize the vdev BARs except SRIOV VF, VF BARs are initialized directly from create_vf function */ + if (vdev->phyfun == NULL) { init_bars(vdev, is_pf_vdev); if (is_prelaunched_vm(vdev->vpci->vm) && (!is_pf_vdev)) { pci_command = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U); diff --git a/hypervisor/dm/vpci/vpci_priv.h b/hypervisor/dm/vpci/vpci_priv.h index d7f37e821..50b24805c 100644 --- a/hypervisor/dm/vpci/vpci_priv.h +++ b/hypervisor/dm/vpci/vpci_priv.h @@ -103,6 +103,7 @@ static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset) void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev); void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val); +void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock); void init_vmsi(struct pci_vdev *vdev); void vmsi_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val); @@ -119,7 +120,6 @@ void init_vsriov(struct pci_vdev *vdev); void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val); void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val); uint32_t sriov_bar_offset(const struct pci_vdev *vdev, uint32_t bar_idx); -void init_sriov_vf_vdev(struct pci_vdev *vdev); uint32_t pci_vdev_read_vcfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes); void pci_vdev_write_vcfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val); diff --git a/hypervisor/dm/vpci/vsriov.c b/hypervisor/dm/vpci/vsriov.c index 257581978..92432c6b3 100644 --- a/hypervisor/dm/vpci/vsriov.c +++ b/hypervisor/dm/vpci/vsriov.c @@ -81,7 +81,7 @@ static void init_sriov_vf_bar(struct pci_vdev *pf_vdev) /** * @pre pf_vdev != NULL */ -static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf) +static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf, uint16_t vf_id) { struct pci_pdev *vf_pdev; struct pci_vdev *vf_vdev = NULL; @@ -112,6 +112,31 @@ static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf) pci_pdev_write_cfg(pf_vdev->bdf, pf_vdev->sriov.capoff + PCIR_SRIOV_CONTROL, 2U, control); pr_err("PF %x:%x.%x can't creat VF, unset VF_ENABLE", pf_vdev->bdf.bits.b, pf_vdev->bdf.bits.d, pf_vdev->bdf.bits.f); + } else { + uint16_t bar_idx; + struct pci_vbar *vf_vbar; + + /* VF bars information from its PF SRIOV capability, no need to access physical device */ + vf_vdev->nr_bars = PCI_BAR_COUNT; + for (bar_idx = 0U; bar_idx < PCI_BAR_COUNT; bar_idx++) { + vf_vbar = &vf_vdev->vbars[bar_idx]; + *vf_vbar = vf_vdev->phyfun->sriov.vbars[bar_idx]; + vf_vbar->base_hpa += (vf_vbar->size * vf_id); + vf_vbar->base = vf_vbar->base_hpa; + if (has_msix_cap(vf_vdev) && (bar_idx == vf_vdev->msix.table_bar)) { + vf_vdev->msix.mmio_hpa = vf_vbar->base_hpa; + vf_vdev->msix.mmio_size = vf_vbar->size; + } + /* + * VF BARs value are zero and read only, according to PCI Express + * Base 4.0 chapter 9.3.4.1.11, the VF + */ + pci_vdev_write_vcfg(vf_vdev, pci_bar_offset(bar_idx), 4U, 0U); + } + + if (has_msix_cap(vf_vdev)) { + vdev_pt_map_msix(vf_vdev, false); + } } } @@ -170,7 +195,7 @@ static void enable_vf(struct pci_vdev *pf_vdev) /* if one VF has never been created then create new pdev/vdev for this VF */ if (pci_find_vdev(&pf_vdev->vpci->vm->vpci, vf_bdf) == NULL) { - create_vf(pf_vdev, vf_bdf); + create_vf(pf_vdev, vf_bdf, idx); } } } else { @@ -270,12 +295,3 @@ uint32_t sriov_bar_offset(const struct pci_vdev *vdev, uint32_t bar_idx) { return (vdev->sriov.capoff + PCIR_SRIOV_VF_BAR_OFF + (bar_idx << 2U)); } - -/** - * @pre vdev != NULL - */ -void init_sriov_vf_vdev(struct pci_vdev *vdev) -{ - /* Implementation in next path */ - (void)vdev; -}