hv: mmio: refine mmio access handle lock granularity

Now only PCI MSI-X BAR access need dynamic register/unregister. Others don't need
unregister once it's registered. So we don't need to lock the vm level emul_mmio_lock
when we handle the MMIO access. Instead, we could use finer granularity lock in the
handler to ptotest the shared resource.

This patch fixed the dead lock issue when OVMF try to size the BAR size:
Becasue OVMF use ECAM to access the PCI configuration space, it will first hold vm
emul_mmio_lock, then calls vpci_handle_mmconfig_access. While this tries to size a
BAR which is also a MSI-X Table BAR, it will call register_mmio_emulation_handler to
register the MSI-X Table BAR MMIO access handler. This will causes the emul_mmio_lock
dead lock.

Tracked-On: #3475
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1
2020-02-19 17:13:00 +08:00
committed by wenlingz
parent fbe57d9f0b
commit 4adad73cfc
5 changed files with 25 additions and 5 deletions

View File

@@ -459,6 +459,7 @@ static int32_t
hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int32_t status = -ENODEV;
bool hold_lock = true;
uint16_t idx;
uint64_t address, size, base, end;
struct mmio_request *mmio_req = &io_req->reqs.mmio;
@@ -484,6 +485,7 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
continue;
} else {
if ((address >= base) && ((address + size) <= end)) {
hold_lock = mmio_handler->hold_lock;
read_write = mmio_handler->read_write;
handler_private_data = mmio_handler->handler_private_data;
} else {
@@ -496,7 +498,16 @@ hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
}
if ((status == -ENODEV) && (read_write != NULL)) {
/* This mmio_handler will never modify once register, so we don't
* need to hold the lock when handling the MMIO access.
*/
if (!hold_lock) {
spinlock_release(&vcpu->vm->emul_mmio_lock);
}
status = read_write(io_req, handler_private_data);
if (!hold_lock) {
spinlock_obtain(&vcpu->vm->emul_mmio_lock);
}
}
spinlock_release(&vcpu->vm->emul_mmio_lock);
@@ -669,7 +680,7 @@ static inline struct mem_io_node *find_free_mmio_node(struct acrn_vm *vm)
*/
void register_mmio_emulation_handler(struct acrn_vm *vm,
hv_mem_io_handler_t read_write, uint64_t start,
uint64_t end, void *handler_private_data)
uint64_t end, void *handler_private_data, bool hold_lock)
{
struct mem_io_node *mmio_node;
@@ -679,6 +690,7 @@ void register_mmio_emulation_handler(struct acrn_vm *vm,
mmio_node = find_free_mmio_node(vm);
if (mmio_node != NULL) {
/* Fill in information for this node */
mmio_node->hold_lock = hold_lock;
mmio_node->read_write = read_write;
mmio_node->handler_private_data = handler_private_data;
mmio_node->range_start = start;

View File

@@ -472,7 +472,7 @@ vioapic_init(struct acrn_vm *vm)
vioapic_mmio_access_handler,
(uint64_t)VIOAPIC_BASE,
(uint64_t)VIOAPIC_BASE + VIOAPIC_SIZE,
vm);
vm, false);
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
(uint64_t)VIOAPIC_BASE, (uint64_t)VIOAPIC_SIZE);
vm->arch_vm.vioapic.ready = true;

View File

@@ -112,7 +112,7 @@ static void vdev_pt_map_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
addr_lo = round_page_down(addr_lo);
addr_hi = round_page_up(addr_hi);
register_mmio_emulation_handler(vm, vmsix_handle_table_mmio_access,
addr_lo, addr_hi, vdev);
addr_lo, addr_hi, vdev, true);
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, addr_lo, addr_hi - addr_lo);
msix->mmio_gpa = vbar->base;
}

View File

@@ -249,7 +249,7 @@ void vpci_init(struct acrn_vm *vm)
pci_mmcfg_base = (vm_config->load_order == SOS_VM) ? get_mmcfg_base() : 0xE0000000UL;
vm->vpci.pci_mmcfg_base = pci_mmcfg_base;
register_mmio_emulation_handler(vm, vpci_handle_mmconfig_access,
pci_mmcfg_base, pci_mmcfg_base + PCI_MMCONFIG_SIZE, &vm->vpci);
pci_mmcfg_base, pci_mmcfg_base + PCI_MMCONFIG_SIZE, &vm->vpci, false);
}
/* Intercept and handle I/O ports CF8h */