hv: vPCI: remove passthrough PCI device unuse code

Now we split passthrough PCI device from DM to HV, we could remove all the passthrough
PCI device unused code.

Tracked-On: #4371
Signed-off-by: Li Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li Fei1 2020-01-17 18:31:18 +08:00 committed by wenlingz
parent 9fa6eff3c5
commit e8479f84cd
9 changed files with 39 additions and 280 deletions

View File

@ -251,9 +251,6 @@ for UOS.
.. doxygenfunction:: ptirq_remove_intx_remapping
:project: Project ACRN
.. doxygenfunction:: ptirq_add_msix_remapping
:project: Project ACRN
.. doxygenfunction:: ptirq_remove_msix_remapping
:project: Project ACRN

View File

@ -610,10 +610,7 @@ int32_t ptirq_prepare_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t
union pci_bdf vbdf;
/*
* Device Model should pre-hold the mapping entries by calling
* ptirq_add_msix_remapping for UOS.
*
* For SOS(sos_vm), it adds the mapping entries at runtime, if the
* adds the mapping entries at runtime, if the
* entry already be held by others, return error.
*/
spinlock_obtain(&ptdev_lock);
@ -842,32 +839,3 @@ void ptirq_remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf,
spinlock_release(&ptdev_lock);
}
}
/* except sos_vm, Device Model should call this function to pre-hold ptdev msi
* entries:
* - the entry is identified by phys_bdf:msi_idx:
* one entry vs. one phys_bdf:msi_idx
*/
int32_t ptirq_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf,
uint16_t phys_bdf, uint32_t vector_count)
{
struct ptirq_remapping_info *entry;
uint32_t i;
uint32_t vector_added = 0U;
for (i = 0U; i < vector_count; i++) {
spinlock_obtain(&ptdev_lock);
entry = add_msix_remapping(vm, virt_bdf, phys_bdf, i);
spinlock_release(&ptdev_lock);
if (entry == NULL) {
break;
}
vector_added++;
}
if (vector_added != vector_count) {
ptirq_remove_msix_remapping(vm, virt_bdf, vector_added);
}
return (vector_added == vector_count) ? 0 : -ENODEV;
}

View File

@ -167,20 +167,6 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
}
break;
case HC_ASSIGN_PTDEV:
/* param1: relative vmid to sos, vm_id: absolute vmid */
if (vmid_is_valid) {
ret = hcall_assign_ptdev(sos_vm, vm_id, param2);
}
break;
case HC_DEASSIGN_PTDEV:
/* param1: relative vmid to sos, vm_id: absolute vmid */
if (vmid_is_valid) {
ret = hcall_deassign_ptdev(sos_vm, vm_id, param2);
}
break;
case HC_ASSIGN_PCIDEV:
/* param1: relative vmid to sos, vm_id: absolute vmid */
if (vmid_is_valid) {

View File

@ -814,74 +814,6 @@ int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
return ret;
}
/**
* @brief Assign one passthrough dev to VM.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param the physical BDF of the assigning ptdev
*
* @pre Pointer vm shall point to SOS_VM
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = 0;
union pci_bdf bdf;
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
struct pci_vdev *vdev;
if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) {
bdf.value = (uint16_t)param;
spinlock_obtain(&vm->vpci.lock);
vdev = pci_find_vdev(&vm->vpci, bdf);
if ((vdev == NULL) || (vdev->pdev == NULL)) {
pr_fatal("%s %x:%x.%x not found\n", __func__, bdf.bits.b, bdf.bits.d, bdf.bits.f);
ret = -EPERM;
} else {
/* ToDo: Each PT device must support one type reset */
if (!vdev->pdev->has_pm_reset && !vdev->pdev->has_flr && !vdev->pdev->has_af_flr) {
pr_fatal("%s %x:%x.%x not support FLR or not support PM reset\n",
__func__, bdf.bits.b, bdf.bits.d, bdf.bits.f);
}
}
spinlock_release(&vm->vpci.lock);
if (ret == 0) {
ret = move_pt_device(vm->iommu, target_vm->iommu, bdf.fields.bus, bdf.fields.devfun);
}
} else {
pr_err("%s, target vm is invalid\n", __func__);
ret = -EINVAL;
}
return ret;
}
/**
* @brief Deassign one passthrough dev from VM.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param the physical BDF of the deassigning ptdev
*
* @pre Pointer vm shall point to SOS_VM
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
{
int32_t ret = -1;
union pci_bdf bdf;
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) {
bdf.value = (uint16_t)param;
ret = move_pt_device(target_vm->iommu, vm->iommu, bdf.fields.bus, bdf.fields.devfun);
}
return ret;
}
/**
* @brief Assign one PCI dev to a VM.
*
@ -964,20 +896,20 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__);
} else {
/* Inform vPCI about the interupt info changes */
vpci_set_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
if (irq.type == IRQ_INTX) {
ret = ptirq_add_intx_remapping(target_vm, irq.is.intx.virt_pin,
irq.is.intx.phys_pin, irq.is.intx.pic_pin);
} else if (((irq.type == IRQ_MSI) || (irq.type == IRQ_MSIX)) &&
(irq.is.msix.vector_cnt <= CONFIG_MAX_MSIX_TABLE_NUM)) {
ret = ptirq_add_msix_remapping(target_vm,
irq.virt_bdf, irq.phys_bdf,
irq.is.msix.vector_cnt);
struct pci_vdev *vdev;
union pci_bdf bdf = {.value = irq.virt_bdf};
struct acrn_vpci *vpci = &target_vm->vpci;
spinlock_obtain(&vpci->lock);
vdev = pci_find_vdev(vpci, bdf);
spinlock_release(&vpci->lock);
if ((vdev != NULL) && (vdev->pdev->bdf.value == irq.phys_bdf)) {
ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin,
irq.intx.phys_pin, irq.intx.pic_pin);
}
} else {
pr_err("%s: Invalid irq type: %u or MSIX vector count: %u\n",
__func__, irq.type, irq.is.msix.vector_cnt);
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
}
}
}
@ -1006,28 +938,22 @@ hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__);
} else if (irq.type == IRQ_INTX) {
vpci_reset_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
ptirq_remove_intx_remapping(target_vm,
irq.is.intx.virt_pin,
irq.is.intx.pic_pin);
ret = 0;
} else if (((irq.type == IRQ_MSI) || (irq.type == IRQ_MSIX)) &&
(irq.is.msix.vector_cnt <= CONFIG_MAX_MSIX_TABLE_NUM)) {
/*
* Inform vPCI about the interupt info changes
* TODO: Need to add bdf info for IRQ_INTX type in devicemodel
*/
vpci_reset_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
ptirq_remove_msix_remapping(target_vm,
irq.virt_bdf,
irq.is.msix.vector_cnt);
ret = 0;
} else {
pr_err("%s: Invalid irq type: %u or MSIX vector count: %u\n",
__func__, irq.type, irq.is.msix.vector_cnt);
if (irq.type == IRQ_INTX) {
struct pci_vdev *vdev;
union pci_bdf bdf = {.value = irq.virt_bdf};
struct acrn_vpci *vpci = &target_vm->vpci;
spinlock_obtain(&vpci->lock);
vdev = pci_find_vdev(vpci, bdf);
spinlock_release(&vpci->lock);
if ((vdev != NULL) && (vdev->pdev->bdf.value == irq.phys_bdf)) {
ptirq_remove_intx_remapping(target_vm, irq.intx.virt_pin, irq.intx.pic_pin);
ret = 0;
}
} else {
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
}
}
}

View File

@ -585,70 +585,6 @@ static void deinit_postlaunched_vm_vpci(struct acrn_vm *vm)
spinlock_release(&sos_vm->vpci.lock);
}
/**
* @pre target_vm != NULL && Pointer target_vm shall point to SOS_VM
*/
void vpci_set_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev, *target_vdev;
struct acrn_vpci *target_vpci;
union pci_bdf bdf;
struct acrn_vm *sos_vm;
bdf.value = pbdf;
sos_vm = get_sos_vm();
spinlock_obtain(&sos_vm->vpci.lock);
vdev = pci_find_vdev(&sos_vm->vpci, bdf);
if ((vdev == NULL) || (vdev->pdev == NULL)) {
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
target_vm->vm_id, vbdf, pbdf);
} else {
if (vdev->vpci->vm == sos_vm) {
spinlock_obtain(&target_vm->vpci.lock);
target_vpci = &(target_vm->vpci);
vdev->vpci = target_vpci;
target_vdev = &target_vpci->pci_vdevs[target_vpci->pci_vdev_cnt];
target_vpci->pci_vdev_cnt++;
(void)memcpy_s((void *)target_vdev, sizeof(struct pci_vdev),
(void *)vdev, sizeof(struct pci_vdev));
target_vdev->bdf.value = vbdf;
vdev->new_owner = target_vdev;
spinlock_release(&target_vm->vpci.lock);
}
}
spinlock_release(&sos_vm->vpci.lock);
}
/**
* @pre target_vm != NULL && Pointer target_vm shall point to SOS_VM
*/
void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf)
{
struct pci_vdev *vdev;
union pci_bdf bdf;
struct acrn_vm *sos_vm;
bdf.value = pbdf;
sos_vm = get_sos_vm();
spinlock_obtain(&sos_vm->vpci.lock);
vdev = pci_find_vdev(&sos_vm->vpci, bdf);
if (vdev == NULL) {
pr_err("%s, can't find PCI device for vm%d, vbdf (0x%x) pbdf (0x%x)", __func__,
target_vm->vm_id, vbdf, pbdf);
} else {
/* Return this PCI device to SOS */
if (vdev->vpci->vm == target_vm) {
spinlock_obtain(&target_vm->vpci.lock);
vdev->vpci = &sos_vm->vpci;
vdev->new_owner = NULL;
spinlock_release(&target_vm->vpci.lock);
}
}
spinlock_release(&sos_vm->vpci.lock);
}
/**
* @brief assign a PCI device from SOS to target post-launched VM.
*

View File

@ -124,27 +124,6 @@ int32_t ptirq_add_intx_remapping(struct acrn_vm *vm, uint32_t virt_pin, uint32_t
*/
void ptirq_remove_intx_remapping(struct acrn_vm *vm, uint32_t virt_pin, bool pic_pin);
/**
* @brief Add interrupt remapping entry/entries for MSI/MSI-x as pre-hold mapping.
*
* Add pre-hold mapping of the given number of vectors between the given physical and virtual BDF for the given vm.
* Except sos_vm, Device Model should call this function to pre-hold ptdev MSI/MSI-x.
* The entry is identified by phys_bdf:msi_idx, one entry vs. one phys_bdf:msi_idx.
*
* @param[in] vm pointer to acrn_vm
* @param[in] virt_bdf virtual bdf associated with the passthrough device
* @param[in] phys_bdf physical bdf associated with the passthrough device
* @param[in] vector_count number of vectors
*
* @return
* - 0: on success
* - \p -ENODEV: failed to add the remapping entry
*
* @pre vm != NULL
*
*/
int32_t ptirq_add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bdf, uint32_t vector_count);
/**
* @brief Remove interrupt remapping entry/entries for MSI/MSI-x.
*

View File

@ -248,30 +248,6 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
*/
int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Assign one passthrough dev to VM.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param the physical BDF of the assigning ptdev
*
* @pre Pointer vm shall point to SOS_VM
* @return 0 on success, non-zero on error.
*/
int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Deassign one passthrough dev from VM.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param the physical BDF of the deassigning ptdev
*
* @pre Pointer vm shall point to SOS_VM
* @return 0 on success, non-zero on error.
*/
int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief Assign one PCI dev to VM.
*

View File

@ -132,8 +132,6 @@ extern const struct pci_vdev_ops vhostbridge_ops;
void vpci_init(struct acrn_vm *vm);
void vpci_cleanup(struct acrn_vm *vm);
struct pci_vdev *pci_find_vdev(struct acrn_vpci *vpci, union pci_bdf vbdf);
void vpci_set_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf);
void vpci_reset_ptdev_intr_info(struct acrn_vm *target_vm, uint16_t vbdf, uint16_t pbdf);
struct acrn_assign_pcidev;
int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pcidev);
int32_t vpci_deassign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pcidev);

View File

@ -249,28 +249,21 @@ struct hc_ptdev_irq {
/** physical BDF of the ptdev */
uint16_t phys_bdf;
union irq_source {
/** INTX remapping info */
struct intx_info {
/** virtual IOAPIC/PIC pin */
uint32_t virt_pin;
/** INTX remapping info */
struct intx_info {
/** virtual IOAPIC/PIC pin */
uint32_t virt_pin;
/** physical IOAPIC pin */
uint32_t phys_pin;
/** physical IOAPIC pin */
uint32_t phys_pin;
/** is virtual pin from PIC */
bool pic_pin;
/** is virtual pin from PIC */
bool pic_pin;
/** Reserved */
uint8_t reserved[3];
} intx;
/** Reserved */
uint8_t reserved[3];
} intx;
/** MSIx remapping info */
struct msix_info {
/** vector count of MSI/MSIX */
uint32_t vector_cnt;
} msix;
} is; /* irq source */
} __aligned(8);
/**