mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-21 02:41:32 +00:00
hv: implement SRIOV VF_BAR initialization
All SRIOV VF physical devices don't have bars in configuration space, they are from the VF associated PF's VF_BAR registers of SRIOV capability. Adding a vbars data structure in pci_cap_sriov data structure to store SRIOV VF_BAR information, so that each VF bars can be initialized directly through the vbars instead multiple accessing of the PF VF_BAR registers. Tracked-On: #4433 Signed-off-by: Yuan Liu <yuan1.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
298ef2f5c4
commit
abbdef4f5d
@ -215,26 +215,46 @@ void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val)
|
|||||||
* Hypervisor traps guest changes to the mmio vbar (gpa) to establish ept mapping
|
* Hypervisor traps guest changes to the mmio vbar (gpa) to establish ept mapping
|
||||||
* between vbar(gpa) and pbar(hpa). pbar should always align on 4K boundary.
|
* between vbar(gpa) and pbar(hpa). pbar should always align on 4K boundary.
|
||||||
*
|
*
|
||||||
|
* @param vdev Pointer to a vdev structure
|
||||||
|
* @param is_sriov_bar When the first parameter vdev is a SRIOV PF vdev, the function
|
||||||
|
* init_bars is used to initialize normal PCIe BARs of PF vdev if the
|
||||||
|
* parameter is_sriov_bar is false, the function init_bars is used to
|
||||||
|
* initialize SRIOV VF BARs of PF vdev if parameter is_sriov_bar is true
|
||||||
|
* Otherwise, the parameter is_sriov_bar should be false if the first
|
||||||
|
* parameter vdev is not SRIOV PF vdev
|
||||||
|
*
|
||||||
* @pre vdev != NULL
|
* @pre vdev != NULL
|
||||||
* @pre vdev->vpci != NULL
|
* @pre vdev->vpci != NULL
|
||||||
* @pre vdev->vpci->vm != NULL
|
* @pre vdev->vpci->vm != NULL
|
||||||
* @pre vdev->pdev != NULL
|
* @pre vdev->pdev != NULL
|
||||||
|
*
|
||||||
|
* @return None
|
||||||
*/
|
*/
|
||||||
static void init_bars(struct pci_vdev *vdev)
|
static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
|
||||||
{
|
{
|
||||||
enum pci_bar_type type;
|
enum pci_bar_type type;
|
||||||
uint32_t idx;
|
uint32_t idx, bar_cnt;
|
||||||
struct pci_vbar *vbar;
|
struct pci_vbar *vbar;
|
||||||
uint32_t size32, offset, lo, hi = 0U;
|
uint32_t size32, offset, lo, hi = 0U;
|
||||||
union pci_bdf pbdf;
|
union pci_bdf pbdf;
|
||||||
uint64_t mask;
|
uint64_t mask;
|
||||||
|
|
||||||
|
if (is_sriov_bar) {
|
||||||
|
bar_cnt = PCI_BAR_COUNT;
|
||||||
|
} else {
|
||||||
vdev->nr_bars = vdev->pdev->nr_bars;
|
vdev->nr_bars = vdev->pdev->nr_bars;
|
||||||
|
bar_cnt = vdev->nr_bars;
|
||||||
|
}
|
||||||
pbdf.value = vdev->pdev->bdf.value;
|
pbdf.value = vdev->pdev->bdf.value;
|
||||||
|
|
||||||
for (idx = 0U; idx < vdev->nr_bars; idx++) {
|
for (idx = 0U; idx < bar_cnt; idx++) {
|
||||||
|
if (is_sriov_bar) {
|
||||||
|
vbar = &vdev->sriov.vbars[idx];
|
||||||
|
offset = sriov_bar_offset(vdev, idx);
|
||||||
|
} else {
|
||||||
vbar = &vdev->vbars[idx];
|
vbar = &vdev->vbars[idx];
|
||||||
offset = pci_bar_offset(idx);
|
offset = pci_bar_offset(idx);
|
||||||
|
}
|
||||||
lo = pci_pdev_read_cfg(pbdf, offset, 4U);
|
lo = pci_pdev_read_cfg(pbdf, offset, 4U);
|
||||||
|
|
||||||
type = pci_get_bar_type(lo);
|
type = pci_get_bar_type(lo);
|
||||||
@ -265,7 +285,11 @@ static void init_bars(struct pci_vdev *vdev)
|
|||||||
|
|
||||||
if (type == PCIBAR_MEM64) {
|
if (type == PCIBAR_MEM64) {
|
||||||
idx++;
|
idx++;
|
||||||
|
if (is_sriov_bar) {
|
||||||
|
offset = sriov_bar_offset(vdev, idx);
|
||||||
|
} else {
|
||||||
offset = pci_bar_offset(idx);
|
offset = pci_bar_offset(idx);
|
||||||
|
}
|
||||||
pci_pdev_write_cfg(pbdf, offset, 4U, ~0U);
|
pci_pdev_write_cfg(pbdf, offset, 4U, ~0U);
|
||||||
size32 = pci_pdev_read_cfg(pbdf, offset, 4U);
|
size32 = pci_pdev_read_cfg(pbdf, offset, 4U);
|
||||||
pci_pdev_write_cfg(pbdf, offset, 4U, hi);
|
pci_pdev_write_cfg(pbdf, offset, 4U, hi);
|
||||||
@ -274,25 +298,36 @@ static void init_bars(struct pci_vdev *vdev)
|
|||||||
vbar->size = vbar->size & ~(vbar->size - 1UL);
|
vbar->size = vbar->size & ~(vbar->size - 1UL);
|
||||||
vbar->size = round_page_up(vbar->size);
|
vbar->size = round_page_up(vbar->size);
|
||||||
|
|
||||||
|
if (is_sriov_bar) {
|
||||||
|
vbar = &vdev->sriov.vbars[idx];
|
||||||
|
} else {
|
||||||
vbar = &vdev->vbars[idx];
|
vbar = &vdev->vbars[idx];
|
||||||
|
}
|
||||||
|
|
||||||
vbar->mask = size32;
|
vbar->mask = size32;
|
||||||
vbar->type = PCIBAR_MEM64HI;
|
vbar->type = PCIBAR_MEM64HI;
|
||||||
|
|
||||||
if (is_prelaunched_vm(vdev->vpci->vm)) {
|
if (is_prelaunched_vm(vdev->vpci->vm)) {
|
||||||
hi = (uint32_t)(vdev->pci_dev_config->vbar_base[idx - 1U] >> 32U);
|
hi = (uint32_t)(vdev->pci_dev_config->vbar_base[idx - 1U] >> 32U);
|
||||||
}
|
}
|
||||||
|
/* if it is parsing SRIOV VF BARs, no need to write vdev bars */
|
||||||
|
if (!is_sriov_bar) {
|
||||||
pci_vdev_write_bar(vdev, idx - 1U, lo);
|
pci_vdev_write_bar(vdev, idx - 1U, lo);
|
||||||
pci_vdev_write_bar(vdev, idx, hi);
|
pci_vdev_write_bar(vdev, idx, hi);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
vbar->size = vbar->size & ~(vbar->size - 1UL);
|
vbar->size = vbar->size & ~(vbar->size - 1UL);
|
||||||
if (type == PCIBAR_MEM32) {
|
if (type == PCIBAR_MEM32) {
|
||||||
vbar->size = round_page_up(vbar->size);
|
vbar->size = round_page_up(vbar->size);
|
||||||
}
|
}
|
||||||
|
/* if it is parsing SRIOV VF BARs, no need to write vdev bar */
|
||||||
|
if (!is_sriov_bar) {
|
||||||
pci_vdev_write_bar(vdev, idx, lo);
|
pci_vdev_write_bar(vdev, idx, lo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief Initialize a specified passthrough vdev structure.
|
* @brief Initialize a specified passthrough vdev structure.
|
||||||
@ -316,11 +351,8 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
|
|||||||
{
|
{
|
||||||
uint16_t pci_command;
|
uint16_t pci_command;
|
||||||
|
|
||||||
/* SRIOV capability initialization implementaion in next patch */
|
init_bars(vdev, is_pf_vdev);
|
||||||
(void) is_pf_vdev;
|
if (is_prelaunched_vm(vdev->vpci->vm) && (!is_pf_vdev)) {
|
||||||
|
|
||||||
init_bars(vdev);
|
|
||||||
if (is_prelaunched_vm(vdev->vpci->vm)) {
|
|
||||||
pci_command = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U);
|
pci_command = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U);
|
||||||
|
|
||||||
/* Disable INTX */
|
/* Disable INTX */
|
||||||
|
@ -159,6 +159,7 @@ void deinit_vmsix(const struct pci_vdev *vdev);
|
|||||||
void init_vsriov(struct pci_vdev *vdev);
|
void init_vsriov(struct pci_vdev *vdev);
|
||||||
void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||||
void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
|
uint32_t sriov_bar_offset(const struct pci_vdev *vdev, uint32_t bar_idx);
|
||||||
|
|
||||||
uint32_t pci_vdev_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes);
|
uint32_t pci_vdev_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes);
|
||||||
void pci_vdev_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
void pci_vdev_write_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||||
|
@ -74,8 +74,7 @@ static bool is_vf_enabled(const struct pci_vdev *pf_vdev)
|
|||||||
*/
|
*/
|
||||||
static void init_sriov_vf_bar(struct pci_vdev *pf_vdev)
|
static void init_sriov_vf_bar(struct pci_vdev *pf_vdev)
|
||||||
{
|
{
|
||||||
/* Implementation in next patch */
|
init_vdev_pt(pf_vdev, true);
|
||||||
(void)pf_vdev;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -234,3 +233,12 @@ void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes,
|
|||||||
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
|
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @pre vdev != NULL
|
||||||
|
*/
|
||||||
|
uint32_t sriov_bar_offset(const struct pci_vdev *vdev, uint32_t bar_idx)
|
||||||
|
{
|
||||||
|
return (vdev->sriov.capoff + PCIR_SRIOV_VF_BAR_OFF + (bar_idx << 2U));
|
||||||
|
}
|
||||||
|
@ -252,7 +252,7 @@ static bool is_hv_owned_pdev(union pci_bdf pbdf)
|
|||||||
static void pci_init_pdev(union pci_bdf pbdf, uint32_t drhd_index)
|
static void pci_init_pdev(union pci_bdf pbdf, uint32_t drhd_index)
|
||||||
{
|
{
|
||||||
if (!is_hv_owned_pdev(pbdf)) {
|
if (!is_hv_owned_pdev(pbdf)) {
|
||||||
init_pdev(pbdf.value, drhd_index);
|
(void)init_pdev(pbdf.value, drhd_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,7 +469,7 @@ static void init_all_dev_config(void)
|
|||||||
total += cnt;
|
total += cnt;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
init_one_dev_config(pdev);
|
(void)init_one_dev_config(pdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,6 +73,12 @@ struct pci_msix {
|
|||||||
struct pci_cap_sriov {
|
struct pci_cap_sriov {
|
||||||
uint32_t capoff;
|
uint32_t capoff;
|
||||||
uint32_t caplen;
|
uint32_t caplen;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the vdev is a SRIOV PF vdev, the vbars is used to store
|
||||||
|
* the bar information that is using to initialize SRIOV VF vdev bar.
|
||||||
|
*/
|
||||||
|
struct pci_vbar vbars[PCI_BAR_COUNT];
|
||||||
};
|
};
|
||||||
|
|
||||||
union pci_cfgdata {
|
union pci_cfgdata {
|
||||||
|
@ -120,6 +120,7 @@
|
|||||||
#define PCIR_SRIOV_NUMVFS 0x10U
|
#define PCIR_SRIOV_NUMVFS 0x10U
|
||||||
#define PCIR_SRIOV_FST_VF_OFF 0x14U
|
#define PCIR_SRIOV_FST_VF_OFF 0x14U
|
||||||
#define PCIR_SRIOV_VF_STRIDE 0x16U
|
#define PCIR_SRIOV_VF_STRIDE 0x16U
|
||||||
|
#define PCIR_SRIOV_VF_BAR_OFF 0x24U
|
||||||
#define PCIM_SRIOV_VF_ENABLE 0x1U
|
#define PCIM_SRIOV_VF_ENABLE 0x1U
|
||||||
|
|
||||||
/* PCI Message Signalled Interrupts (MSI) */
|
/* PCI Message Signalled Interrupts (MSI) */
|
||||||
|
Loading…
Reference in New Issue
Block a user