hv: resolve the negative impacts to UOS MSI/MSI-X remapping

After enabling vPCI in hypervisor for vm0, UOS may not able be launched
successfully. Consider this scenario (take MSI-X for example):

- DM makes hypercall to hypervisor to do MSI-X remapping on behalf of
  UOS guests.
- After the hypercall, VHM module in SOS kernel updates the physical
  MSI-X table with the physical Message Data/Addr.
- These MMIO write requests are intercepted by hypervisor, which will
  call ptdev_msix_remap() to do MSI-S remapping.

It may fail due to 2 possible reasons:

1) wrong target VM because:
  hypervisor thinks it's a VM0 MSI-X device but they have been registered
  as UOS guests through HC_SET_PTDEV_INTR_INFO hypercall.

2) wrong ptdev_msi_info->vmsi_data because:
  The virtual MSI-X table is supposed to hold virtual Message data/addr
  but the SOS VHM writes the physical ones to it.

This patch resolves these problems by ignoring the HC_VM_PCI_MSIX_REMAP
hypercall, so virtual and physical Message Data are the same from SOS'
perspective and it won't mess up the virtual PCI device in HV.

Also in HC_SET_PTDEV_INTR_INFO handler, vpci updates the target VM
when the PCI devices are assigned to different VMs.

The UOS' MSI/MSI-X remapping is triggered by hypervisor when SOS (either
DM or VHM) updates the Message Data/Addr.

Tracked-On: #1568
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Signed-off-by: Zide Chen <zide.chen@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Zide Chen 2018-10-24 14:24:53 -07:00 committed by Xie, Nanlin
parent c1d2499e5c
commit 0255e62798
5 changed files with 57 additions and 60 deletions

View File

@ -119,10 +119,12 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
ret = hcall_write_protect_page(vm, (uint16_t)param1, param2);
break;
/*
* Don't do MSI remapping and make the pmsi_data equal to vmsi_data
* This is a temporary solution before this hypercall is removed from SOS
*/
case HC_VM_PCI_MSIX_REMAP:
/* param1: vmid */
ret = hcall_remap_pci_msix(vm, (uint16_t)param1, param2);
ret = 0;
break;
case HC_VM_GPA2HPA:

View File

@ -556,47 +556,6 @@ int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa)
return write_protect_page(target_vm, &wp);
}
/**
*@pre Pointer vm shall point to VM0
*/
int32_t hcall_remap_pci_msix(struct vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret;
struct acrn_vm_pci_msix_remap remap;
struct ptdev_msi_info info;
struct vm *target_vm = get_vm_from_vmid(vmid);
if (target_vm == NULL) {
return -1;
}
(void)memset((void *)&remap, 0U, sizeof(remap));
if (copy_from_gpa(vm, &remap, param, sizeof(remap)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
}
info.is_msix = remap.msix;
info.vmsi_ctl = remap.msi_ctl;
info.vmsi_addr = remap.msi_addr;
info.vmsi_data = remap.msi_data;
if (remap.msix_entry_index >= MAX_MSI_ENTRY) {
return -1;
}
ret = ptdev_msix_remap(target_vm, remap.virt_bdf,
(uint16_t)remap.msix_entry_index, &info);
remap.msi_data = info.pmsi_data;
remap.msi_addr = info.pmsi_addr;
if (copy_to_gpa(vm, &remap, param, sizeof(remap)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
}
return ret;
}
/**
*@pre Pointer vm shall point to VM0
*/
@ -714,6 +673,11 @@ int32_t hcall_set_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
return -1;
}
/* Inform vPCI about the interupt info changes */
#ifndef CONFIG_PARTITION_MODE
vpci_set_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
#endif
if (irq.type == IRQ_INTX) {
ret = ptdev_add_intx_remapping(target_vm, irq.is.intx.virt_pin,
irq.is.intx.phys_pin, irq.is.intx.pic_pin);
@ -755,6 +719,15 @@ hcall_reset_ptdev_intr_info(struct vm *vm, uint16_t vmid, uint64_t param)
irq.is.intx.virt_pin,
irq.is.intx.pic_pin);
} else if ((irq.type == IRQ_MSI) || (irq.type == IRQ_MSIX)) {
/*
* Inform vPCI about the interupt info changes
* TODO: Need to add bdf info for IRQ_INTX type in devicemodel
*/
#ifndef CONFIG_PARTITION_MODE
vpci_reset_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
#endif
ptdev_remove_msix_remapping(target_vm,
irq.virt_bdf,
irq.is.msix.vector_cnt);

View File

@ -200,3 +200,39 @@ struct vpci_ops sharing_mode_vpci_ops = {
.cfgread = sharing_mode_cfgread,
.cfgwrite = sharing_mode_cfgwrite,
};
void vpci_set_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev;
vdev = sharing_mode_find_vdev((union pci_bdf)pbdf);
if (vdev == NULL) {
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
target_vm->vm_id, vbdf, pbdf);
return;
}
/* UOS may do BDF mapping */
vdev->vpci = &target_vm->vpci;
vdev->vbdf.value = vbdf;
vdev->pdev.bdf.value = pbdf;
}
void vpci_reset_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev;
struct vm *vm;
vdev = sharing_mode_find_vdev((union pci_bdf)pbdf);
if (vdev == NULL) {
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
target_vm->vm_id, vbdf, pbdf);
return;
}
/* Return this PCI device to SOS */
if (vdev->vpci->vm == target_vm) {
vm = get_vm_from_vmid(0U);
vdev->vpci = &vm->vpci;
}
}

View File

@ -250,22 +250,6 @@ int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param);
*/
int32_t hcall_write_protect_page(struct vm *vm, uint16_t vmid, uint64_t wp_gpa);
/**
* @brief remap PCI MSI interrupt
*
* Remap a PCI MSI interrupt from a VM's virtual vector to native vector.
* The function will return -1 if the target VM does not exist.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
* struct acrn_vm_pci_msix_remap
*
* @pre Pointer vm shall point to VM0
* @return 0 on success, non-zero on error.
*/
int32_t hcall_remap_pci_msix(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief translate guest physical address to host physical address
*

View File

@ -144,5 +144,7 @@ extern struct pci_vdev_ops pci_ops_vdev_pt;
void vpci_init(struct vm *vm);
void vpci_cleanup(struct vm *vm);
void vpci_set_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf);
void vpci_reset_ptdev_intr_info(struct vm *target_vm, uint16_t vbdf, uint16_t pbdf);
#endif /* VPCI_H_ */