HV: call get_vbar_base() to get the newly set vbar base address in 64-bit

Replace new_base with vbar_base in vdev_pt_remap_generic_mem_vbar().
We will call vdev_pt_remap_generic_mem_vbar() after a new vbar base
is set, no need to pass new_base to vdev_pt_remap_generic_mem_vbar(),
as this new vbar base (vbar_base) can be obtained by calling get_vbar_base().

The reason we call vdev_pt_remap_generic_mem_vbar() after a new vbar base
is set is for 64-bit mmio handling: when the lower 32-bit of 64-bit mmio vbar is
set, we will defer calling vdev_pt_remap_generic_mem_vbar until its upper 32-bit
vbar base is set.

Tracked-On: #3241
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
dongshen 2019-06-27 17:18:35 -07:00 committed by ACRN System Integration
parent ed1bdcbbdf
commit f0244b24e7

View File

@ -262,30 +262,32 @@ void vdev_pt_remap_msix_table_bar(struct pci_vdev *vdev)
* @pre vdev->vpci != NULL * @pre vdev->vpci != NULL
* @pre vdev->vpci->vm != NULL * @pre vdev->vpci->vm != NULL
*/ */
static void vdev_pt_remap_generic_mem_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t new_base) static void vdev_pt_remap_generic_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
{ {
struct acrn_vm *vm = vdev->vpci->vm; struct acrn_vm *vm = vdev->vpci->vm;
uint64_t vbar_base = get_vbar_base(vdev, idx); /* vbar (gpa) */
/* If the old vbar is mapped before, unmap it first */ /* If the old vbar is mapped before, unmap it first */
if (vdev->bar_base_mapped[idx] != 0UL) { if (vdev->bar_base_mapped[idx] != 0UL) {
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_del_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
vdev->bar_base_mapped[idx], /* GPA (old vbar) */ vdev->bar_base_mapped[idx], /* GPA (old vbar) */
vdev->bar[idx].size); vdev->bar[idx].size);
vdev->bar_base_mapped[idx] = 0UL; vdev->bar_base_mapped[idx] = 0UL;
} }
if (new_base != 0U) { /* If a new vbar is set (nonzero), set the EPT mapping accordingly */
if (vbar_base != 0UL) {
uint64_t pbar_base = get_pbar_base(vdev->pdev, idx); /* pbar (hpa) */ uint64_t pbar_base = get_pbar_base(vdev->pdev, idx); /* pbar (hpa) */
/* Map the physical BAR in the guest MMIO space */ /* Map the physical BAR in the guest MMIO space */
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_add_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
pbar_base, /* HPA (pbar) */ pbar_base, /* HPA (pbar) */
new_base, /*GPA*/ vbar_base, /* GPA (new vbar) */
vdev->bar[idx].size, vdev->bar[idx].size,
EPT_WR | EPT_RD | EPT_UNCACHED); EPT_WR | EPT_RD | EPT_UNCACHED);
/* Remember the previously mapped MMIO vbar */ /* Remember the previously mapped MMIO vbar */
vdev->bar_base_mapped[idx] = (uint64_t)new_base; vdev->bar_base_mapped[idx] = vbar_base;
} }
} }
@ -343,8 +345,7 @@ static void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t offset, uint32_t
vdev->bar[idx].base = base; vdev->bar[idx].base = base;
vdev_pt_remap_msix_table_bar(vdev); vdev_pt_remap_msix_table_bar(vdev);
} else { } else {
vdev_pt_remap_generic_mem_vbar(vdev, idx, base); vdev_pt_remap_generic_mem_vbar(vdev, idx);
vdev->bar[idx].base = base; vdev->bar[idx].base = base;
} }
} }