hv: vpci: restore physical BARs when writing Command Register if necessary

When PCIe does Conventinal Reset or FLR, almost PCIe configurations and states will
lost. So we should save the configurations and states before do the reset and restore
them after the reset. This was done well by BIOS or Guest now. However, ACRN will trap
these access and handle them properly for security. Almost of these configurations and
states will be written to physical configuration space at last except for BAR values
for now. So we should do the restore for BAR values. One way is to do restore after
one type reset is detected. This will be too complex. Another way is to do the restore
when BIOS or guest tries to write the Command Register. This could work because:
1. The I/O Space Enable bit and Memory Space Enable bits in Command Register will reset
to zero.
2. Before BIOS or guest wants to enable these bits, the BAR couldn't be accessed.
3. So we could restore the BAR values before enable these bits if reset is detected.

Tracked-On: #3475
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1 2019-12-26 00:35:34 +08:00 committed by wenlingz
parent 742abaf2e6
commit 6c549d48a8
5 changed files with 66 additions and 0 deletions

View File

@ -307,3 +307,23 @@ void init_vdev_pt(struct pci_vdev *vdev)
pci_pdev_write_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U, pci_command);
}
}
/*
* @pre vdev != NULL && vdev->pdev != NULL && vdev->pdev->hdr_type == PCIM_HDRTYPE_NORMAL
*/
void vdev_pt_write_command(const struct pci_vdev *vdev, uint32_t bytes, uint16_t new_cmd)
{
union pci_bdf bdf = vdev->pdev->bdf;
uint16_t phys_cmd = (uint16_t)pci_pdev_read_cfg(bdf, PCIR_COMMAND, 2U);
uint16_t enable_mask = PCIM_CMD_PORTEN | PCIM_CMD_MEMEN;
/* WARN: don't support Type 1 device BAR restore for now */
if (vdev->pdev->hdr_type == PCIM_HDRTYPE_NORMAL) {
if (((phys_cmd & enable_mask) == 0U) && ((new_cmd & enable_mask) != 0U) &&
pdev_need_bar_restore(vdev->pdev)) {
pdev_restore_bar(vdev->pdev);
}
}
pci_pdev_write_cfg(bdf, PCIR_COMMAND, bytes, new_cmd);
}

View File

@ -342,6 +342,8 @@ static int32_t vpci_write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
((vdev->af_capoff + PCIR_AF_CTRL) == offset) && ((val & PCIM_AF_FLR) != 0U))) {
/* Assume that guest write FLR must be 4 bytes aligned */
pdev_do_flr(vdev->pdev->bdf, offset, bytes, val);
} else if (offset == PCIR_COMMAND) {
vdev_pt_write_command(vdev, (bytes > 2U) ? 2U : bytes, (uint16_t)val);
} else {
/* passthru to physical device */
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);

View File

@ -127,6 +127,7 @@ static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
void init_vdev_pt(struct pci_vdev *vdev);
void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
void vdev_pt_write_command(const struct pci_vdev *vdev, uint32_t bytes, uint16_t new_cmd);
void init_vmsi(struct pci_vdev *vdev);
void vmsi_read_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);

View File

@ -151,6 +151,40 @@ void pci_pdev_write_cfg(union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint
spinlock_release(&pci_device_lock);
}
bool pdev_need_bar_restore(const struct pci_pdev *pdev)
{
bool need_restore = false;
uint32_t idx, bar;
for (idx = 0U; idx < PCI_STD_NUM_BARS; idx++) {
bar = pci_pdev_read_cfg(pdev->bdf, pci_bar_offset(idx), 4U);
if (bar != pdev->bars[idx]) {
need_restore = true;
break;
}
}
return need_restore;
}
static inline void pdev_save_bar(struct pci_pdev *pdev)
{
uint32_t idx;
for (idx = 0U; idx < PCI_STD_NUM_BARS; idx++) {
pdev->bars[idx] = pci_pdev_read_cfg(pdev->bdf, pci_bar_offset(idx), 4U);
}
}
void pdev_restore_bar(const struct pci_pdev *pdev)
{
uint32_t idx;
for (idx = 0U; idx < PCI_STD_NUM_BARS; idx++) {
pci_pdev_write_cfg(pdev->bdf, pci_bar_offset(idx), 4U, pdev->bars[idx]);
}
}
/* enable: 1: enable INTx; 0: Disable INTx */
void enable_disable_pci_intx(union pci_bdf bdf, bool enable)
{
@ -478,7 +512,11 @@ static void init_pdev(uint16_t pbdf, uint32_t drhd_index)
if ((hdr_type == PCIM_HDRTYPE_NORMAL) || (hdr_type == PCIM_HDRTYPE_BRIDGE)) {
pdev = &pci_pdev_array[num_pci_pdev];
pdev->bdf.value = pbdf;
pdev->hdr_type = hdr_type;
pdev->nr_bars = pci_pdev_get_nr_bars(hdr_type);
if (hdr_type == PCIM_HDRTYPE_NORMAL) {
pdev_save_bar(pdev);
}
if ((pci_pdev_read_cfg(bdf, PCIR_STATUS, 2U) & PCIM_STATUS_CAPPRESENT) != 0U) {
pci_read_cap(pdev);

View File

@ -179,11 +179,14 @@ struct pci_msix_cap {
};
struct pci_pdev {
uint8_t hdr_type;
/* IOMMU responsible for DMA and Interrupt Remapping for this device */
uint32_t drhd_index;
/* The bar info of the physical PCI device. */
uint32_t nr_bars; /* 6 for normal device, 2 for bridge, 1 for cardbus */
uint32_t bars[PCI_STD_NUM_BARS];
/* The bus/device/function triple of the physical PCI device. */
union pci_bdf bdf;
@ -314,5 +317,7 @@ static inline bool is_pci_cfg_bridge(uint8_t header_type)
}
void pdev_do_flr(union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
bool pdev_need_bar_restore(const struct pci_pdev *pdev);
void pdev_restore_bar(const struct pci_pdev *pdev);
#endif /* PCI_H_ */