hv: vpci: add a global CFG header configuration access handler

Add cfg_header_read_cfg and cfg_header_write_cfg to handle the 1st 64B
CFG Space header PCI configuration space.
Only Command and Status Registers are pass through;
Only Command and Status Registers and Base Address Registers are writable.
In order to implement this, we add two type bit mask for per 4B register:
pass through mask and read-only mask. When pass through bit mask is set, this
means this bit of this 4B register is pass through, otherwise, it is virtualized;
When read-only mask is set, this means this bit of this 4B register is read-only,
otherwise, it's writable. We should write it to physical CFG space or virtual
CFG space base on whether the pass through bit mask is set or not.

Tracked-On: #4371
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1 2020-03-03 13:51:20 +08:00 committed by wenlingz
parent 460e7ee5b1
commit 4367657771
4 changed files with 112 additions and 57 deletions

View File

@ -247,6 +247,10 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
}
pbdf.value = vdev->pdev->bdf.value;
for (offset = 0U; offset < PCI_CFG_HEADER_LENGTH; offset += 4U) {
pci_vdev_write_cfg(vdev, offset, 4U, pci_pdev_read_cfg(pbdf, offset, 4U));
}
for (idx = 0U; idx < bar_cnt; idx++) {
if (is_sriov_bar) {
vbar = &vdev->sriov.vbars[idx];
@ -364,23 +368,3 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
}
}
}
/*
* @pre vdev != NULL && vdev->pdev != NULL && vdev->pdev->hdr_type == PCIM_HDRTYPE_NORMAL
*/
void vdev_pt_write_command(const struct pci_vdev *vdev, uint32_t bytes, uint16_t new_cmd)
{
union pci_bdf bdf = vdev->pdev->bdf;
uint16_t phys_cmd = (uint16_t)pci_pdev_read_cfg(bdf, PCIR_COMMAND, 2U);
uint16_t enable_mask = PCIM_CMD_PORTEN | PCIM_CMD_MEMEN;
/* WARN: don't support Type 1 device BAR restore for now */
if (vdev->pdev->hdr_type == PCIM_HDRTYPE_NORMAL) {
if (((phys_cmd & enable_mask) == 0U) && ((new_cmd & enable_mask) != 0U) &&
pdev_need_bar_restore(vdev->pdev)) {
pdev_restore_bar(vdev->pdev);
}
}
pci_pdev_write_cfg(bdf, PCIR_COMMAND, bytes, new_cmd);
}

View File

@ -370,37 +370,38 @@ static void vpci_deinit_pt_dev(struct pci_vdev *vdev)
deinit_vmsi(vdev);
}
static int32_t write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t val)
{
if (vbar_access(vdev, offset)) {
/* bar write access must be 4 bytes and offset must also be 4 bytes aligned */
if ((bytes == 4U) && ((offset & 0x3U) == 0U)) {
vdev_pt_write_vbar(vdev, pci_bar_index(offset), val);
}
} else if (msicap_access(vdev, offset)) {
vmsi_write_cfg(vdev, offset, bytes, val);
} else if (msixcap_access(vdev, offset)) {
vmsix_write_cfg(vdev, offset, bytes, val);
} else if (sriovcap_access(vdev, offset)) {
write_sriov_cap_reg(vdev, offset, bytes, val);
} else if (offset == PCIR_COMMAND) {
vdev_pt_write_command(vdev, (bytes > 2U) ? 2U : bytes, (uint16_t)val);
} else {
if (is_postlaunched_vm(vdev->vpci->vm) &&
in_range(offset, PCIR_INTERRUPT_LINE, 4U)) {
pci_vdev_write_cfg(vdev, offset, bytes, val);
} else {
/* passthru to physical device */
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
}
}
struct cfg_header_perm {
/* For each 4-byte register defined in PCI config space header,
* there is one bit dedicated for it in pt_mask and ro_mask.
* For example, bit 0 for CFG Vendor ID and Device ID register,
* Bit 1 for CFG register Command and Status register, and so on.
*
* For each mask, only low 16-bits takes effect.
*
* If bit x is set the pt_mask, it indicates that the corresponding 4 Bytes register
* for bit x is pass through to guest. Otherwise, it's virtualized.
*
* If bit x is set the ro_mask, it indicates that the corresponding 4 Bytes register
* for bit x is read-only. Otherwise, it's writable.
*/
uint32_t pt_mask;
uint32_t ro_mask;
};
return 0;
}
static const struct cfg_header_perm cfg_hdr_perm = {
/* Only Command (0x04-0x05) and Status (0x06-0x07) Registers are pass through */
.pt_mask = 0x0002U,
/* Command (0x04-0x05) and Status (0x06-0x07) Registers and
* Base Address Registers (0x10-0x27) are writable */
.ro_mask = (uint16_t)~0x03f2U
};
static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t *val)
/*
* @pre offset + bytes < PCI_CFG_HEADER_LENGTH
*/
static void read_cfg_header(const struct pci_vdev *vdev,
uint32_t offset, uint32_t bytes, uint32_t *val)
{
if (vbar_access(vdev, offset)) {
/* bar access must be 4 bytes and offset must also be 4 bytes aligned */
@ -409,6 +410,72 @@ static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
} else {
*val = ~0U;
}
} else {
if (bitmap32_test(((uint16_t)offset) >> 2U, &cfg_hdr_perm.pt_mask)) {
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
} else {
*val = pci_vdev_read_cfg(vdev, offset, bytes);
}
}
}
/*
* @pre offset + bytes < PCI_CFG_HEADER_LENGTH
*/
static void write_cfg_header(struct pci_vdev *vdev,
uint32_t offset, uint32_t bytes, uint32_t val)
{
if (vbar_access(vdev, offset)) {
/* bar write access must be 4 bytes and offset must also be 4 bytes aligned */
if ((bytes == 4U) && ((offset & 0x3U) == 0U)) {
vdev_pt_write_vbar(vdev, pci_bar_index(offset), val);
}
} else {
if (offset == PCIR_COMMAND) {
#define PCIM_SPACE_EN (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)
uint16_t phys_cmd = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U);
/* check whether need to restore BAR because some kind of reset */
if (((phys_cmd & PCIM_SPACE_EN) == 0U) && ((val & PCIM_SPACE_EN) != 0U) &&
pdev_need_bar_restore(vdev->pdev)) {
pdev_restore_bar(vdev->pdev);
}
}
if (!bitmap32_test(((uint16_t)offset) >> 2U, &cfg_hdr_perm.ro_mask)) {
if (bitmap32_test(((uint16_t)offset) >> 2U, &cfg_hdr_perm.pt_mask)) {
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
} else {
pci_vdev_write_cfg(vdev, offset, bytes, val);
}
}
}
}
static int32_t write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t val)
{
if (cfg_header_access(offset)) {
write_cfg_header(vdev, offset, bytes, val);
} else if (msicap_access(vdev, offset)) {
vmsi_write_cfg(vdev, offset, bytes, val);
} else if (msixcap_access(vdev, offset)) {
vmsix_write_cfg(vdev, offset, bytes, val);
} else if (sriovcap_access(vdev, offset)) {
write_sriov_cap_reg(vdev, offset, bytes, val);
} else {
/* passthru to physical device */
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
}
return 0;
}
static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
uint32_t bytes, uint32_t *val)
{
if (cfg_header_access(offset)) {
read_cfg_header(vdev, offset, bytes, val);
} else if (msicap_access(vdev, offset)) {
vmsi_read_cfg(vdev, offset, bytes, val);
} else if (msixcap_access(vdev, offset)) {
@ -416,13 +483,8 @@ static int32_t read_pt_dev_cfg(const struct pci_vdev *vdev, uint32_t offset,
} else if (sriovcap_access(vdev, offset)) {
read_sriov_cap_reg(vdev, offset, bytes, val);
} else {
if (is_postlaunched_vm(vdev->vpci->vm) &&
in_range(offset, PCIR_INTERRUPT_LINE, 4U)) {
*val = pci_vdev_read_cfg(vdev, offset, bytes);
} else {
/* passthru to physical device */
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
}
/* passthru to physical device */
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
}
return 0;

View File

@ -101,6 +101,14 @@ static inline bool vbar_access(const struct pci_vdev *vdev, uint32_t offset)
return is_bar_offset(vdev->nr_bars, offset);
}
/**
* @pre vdev != NULL
*/
static inline bool cfg_header_access(uint32_t offset)
{
return (offset < PCI_CFG_HEADER_LENGTH);
}
/**
* @pre vdev != NULL
*/
@ -119,7 +127,6 @@ static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev);
void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
void vdev_pt_write_command(const struct pci_vdev *vdev, uint32_t bytes, uint16_t new_cmd);
void init_vmsi(struct pci_vdev *vdev);
void vmsi_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);

View File

@ -43,6 +43,8 @@
* PCIZ_xxx: extended capability identification number
*/
#define PCI_CFG_HEADER_LENGTH 0x40U
/* some PCI bus constants */
#define PCI_BUSMAX 0xFFU
#define PCI_SLOTMAX 0x1FU