mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-05 18:25:05 +00:00
hv: vpci: refine vpci deinit
The existing code do separately for each VM when we deinit vpci of a VM. This is not necessary. This patch use the common handling for all VMs: we first deassign it from the (current) user, then give it back to its parent user. When we deassign the vdev from the (current) user, we would de-initialize the vMSI/VMSI-X remapping, so does the vMSI/vMSI-X data structure. Tracked-On: #4550 Signed-off-by: Li Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong<eddie.dong@Intel.com>
This commit is contained in:
parent
15e3062631
commit
f9d26a80ed
@ -127,10 +127,11 @@ void write_vmsi_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint
|
||||
* @pre vdev->vpci != NULL
|
||||
* @pre vdev->vpci->vm != NULL
|
||||
*/
|
||||
void deinit_vmsi(const struct pci_vdev *vdev)
|
||||
void deinit_vmsi(struct pci_vdev *vdev)
|
||||
{
|
||||
if (has_msi_cap(vdev)) {
|
||||
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->bdf.value, 1U);
|
||||
(void)memset((void *)&vdev->msi, 0U, sizeof(struct pci_msi));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,11 +262,12 @@ void init_vmsix(struct pci_vdev *vdev)
|
||||
* @pre vdev->vpci != NULL
|
||||
* @pre vdev->vpci->vm != NULL
|
||||
*/
|
||||
void deinit_vmsix(const struct pci_vdev *vdev)
|
||||
void deinit_vmsix(struct pci_vdev *vdev)
|
||||
{
|
||||
if (has_msix_cap(vdev)) {
|
||||
if (vdev->msix.table_count != 0U) {
|
||||
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->bdf.value, vdev->msix.table_count);
|
||||
(void)memset((void *)&vdev->msix, 0U, sizeof(struct pci_msix));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -39,8 +39,6 @@
|
||||
#include "pci_dev.h"
|
||||
|
||||
static void vpci_init_vdevs(struct acrn_vm *vm);
|
||||
static void deinit_prelaunched_vm_vpci(struct acrn_vm *vm);
|
||||
static void deinit_postlaunched_vm_vpci(struct acrn_vm *vm);
|
||||
static int32_t vpci_read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
static int32_t vpci_write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
static struct pci_vdev *find_available_vdev(struct acrn_vpci *vpci, union pci_bdf bdf);
|
||||
@ -272,23 +270,24 @@ void init_vpci(struct acrn_vm *vm)
|
||||
*/
|
||||
void deinit_vpci(struct acrn_vm *vm)
|
||||
{
|
||||
struct acrn_vm_config *vm_config;
|
||||
struct pci_vdev *vdev, *parent_vdev;
|
||||
uint32_t i;
|
||||
|
||||
vm_config = get_vm_config(vm->vm_id);
|
||||
switch (vm_config->load_order) {
|
||||
case PRE_LAUNCHED_VM:
|
||||
case SOS_VM:
|
||||
/* deinit function for both SOS and pre-launched VMs (consider sos also as pre-launched) */
|
||||
deinit_prelaunched_vm_vpci(vm);
|
||||
break;
|
||||
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
|
||||
vdev = (struct pci_vdev *) &(vm->vpci.pci_vdevs[i]);
|
||||
|
||||
case POST_LAUNCHED_VM:
|
||||
deinit_postlaunched_vm_vpci(vm);
|
||||
break;
|
||||
/* Only deinit the VM's own devices */
|
||||
if (vdev->user == vdev) {
|
||||
parent_vdev = vdev->parent_user;
|
||||
|
||||
default:
|
||||
/* Unsupported VM type - Do nothing */
|
||||
break;
|
||||
vdev->vdev_ops->deinit_vdev(vdev);
|
||||
|
||||
if (parent_vdev != NULL) {
|
||||
spinlock_obtain(&parent_vdev->vpci->lock);
|
||||
parent_vdev->vdev_ops->init_vdev(parent_vdev);
|
||||
spinlock_release(&parent_vdev->vpci->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ptdev_release_all_entries(vm);
|
||||
@ -658,76 +657,6 @@ static void vpci_init_vdevs(struct acrn_vm *vm)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vm != NULL
|
||||
* @pre vm->vpci.pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
|
||||
* @pre is_sos_vm(vm) || is_prelaunched_vm(vm)
|
||||
*/
|
||||
static void deinit_prelaunched_vm_vpci(struct acrn_vm *vm)
|
||||
{
|
||||
struct pci_vdev *vdev;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
|
||||
vdev = (struct pci_vdev *) &(vm->vpci.pci_vdevs[i]);
|
||||
|
||||
/* Only deinit the VM's own devices */
|
||||
if (vdev->user == vdev) {
|
||||
vdev->vdev_ops->deinit_vdev(vdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vm != NULL && Pointer vm shall point to SOS_VM
|
||||
* @pre vm->vpci.pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
|
||||
* @pre is_postlaunched_vm(vm) == true
|
||||
*/
|
||||
static void deinit_postlaunched_vm_vpci(struct acrn_vm *vm)
|
||||
{
|
||||
struct acrn_vm *sos_vm;
|
||||
uint32_t i;
|
||||
struct pci_vdev *vdev, *target_vdev;
|
||||
int32_t ret;
|
||||
/* PCI resources
|
||||
* 1) IOMMU domain switch
|
||||
* 2) Relese UOS MSI host IRQ/IRTE
|
||||
* 3) Update vdev info in SOS vdev
|
||||
* Cleanup mentioned above is taken care when DM releases UOS resources
|
||||
* during a UOS reboot or shutdown
|
||||
* In the following cases, where DM does not get chance to cleanup
|
||||
* 1) DM crash/segfault
|
||||
* 2) SOS triple fault/hang
|
||||
* 3) SOS reboot before shutting down POST_LAUNCHED_VMs
|
||||
* ACRN must cleanup
|
||||
*/
|
||||
sos_vm = get_sos_vm();
|
||||
spinlock_obtain(&sos_vm->vpci.lock);
|
||||
for (i = 0U; i < sos_vm->vpci.pci_vdev_cnt; i++) {
|
||||
vdev = (struct pci_vdev *)&(sos_vm->vpci.pci_vdevs[i]);
|
||||
|
||||
/* Only deinit the VM's own devices */
|
||||
if (vdev->user == vdev) {
|
||||
spinlock_obtain(&vm->vpci.lock);
|
||||
target_vdev = vdev->user;
|
||||
ret = move_pt_device(vm->iommu, sos_vm->iommu, (uint8_t)target_vdev->pdev->bdf.bits.b,
|
||||
(uint8_t)(target_vdev->pdev->bdf.value & 0xFFU));
|
||||
if (ret != 0) {
|
||||
panic("failed to assign iommu device!");
|
||||
}
|
||||
|
||||
deinit_vmsi(target_vdev);
|
||||
|
||||
deinit_vmsix(target_vdev);
|
||||
|
||||
/* Move vdev pointers back to SOS*/
|
||||
vdev->user = NULL;
|
||||
spinlock_release(&vm->vpci.lock);
|
||||
}
|
||||
}
|
||||
spinlock_release(&sos_vm->vpci.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief assign a PCI device from SOS to target post-launched VM.
|
||||
*
|
||||
@ -765,32 +694,31 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pc
|
||||
pdev_restore_bar(vdev_in_sos->pdev);
|
||||
}
|
||||
|
||||
remove_vdev_pt_iommu_domain(vdev_in_sos);
|
||||
if (ret == 0) {
|
||||
vpci = &(tgt_vm->vpci);
|
||||
vdev_in_sos->vdev_ops->deinit_vdev(vdev_in_sos);
|
||||
|
||||
spinlock_obtain(&tgt_vm->vpci.lock);
|
||||
vdev = vpci_init_vdev(vpci, vdev_in_sos->pci_dev_config, vdev_in_sos->phyfun);
|
||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_LINE, 1U, pcidev->intr_line);
|
||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_PIN, 1U, pcidev->intr_pin);
|
||||
for (idx = 0U; idx < vdev->nr_bars; idx++) {
|
||||
/* VF is assigned to a UOS */
|
||||
if (vdev->phyfun != NULL) {
|
||||
vdev->vbars[idx] = vdev_in_sos->vbars[idx];
|
||||
if (has_msix_cap(vdev) && (idx == vdev->msix.table_bar)) {
|
||||
vdev->msix.mmio_hpa = vdev->vbars[idx].base_hpa;
|
||||
vdev->msix.mmio_size = vdev->vbars[idx].size;
|
||||
}
|
||||
vpci = &(tgt_vm->vpci);
|
||||
|
||||
spinlock_obtain(&tgt_vm->vpci.lock);
|
||||
vdev = vpci_init_vdev(vpci, vdev_in_sos->pci_dev_config, vdev_in_sos->phyfun);
|
||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_LINE, 1U, pcidev->intr_line);
|
||||
pci_vdev_write_vcfg(vdev, PCIR_INTERRUPT_PIN, 1U, pcidev->intr_pin);
|
||||
for (idx = 0U; idx < vdev->nr_bars; idx++) {
|
||||
/* VF is assigned to a UOS */
|
||||
if (vdev->phyfun != NULL) {
|
||||
vdev->vbars[idx] = vdev_in_sos->vbars[idx];
|
||||
if (has_msix_cap(vdev) && (idx == vdev->msix.table_bar)) {
|
||||
vdev->msix.mmio_hpa = vdev->vbars[idx].base_hpa;
|
||||
vdev->msix.mmio_size = vdev->vbars[idx].size;
|
||||
}
|
||||
pci_vdev_write_vbar(vdev, idx, pcidev->bar[idx]);
|
||||
}
|
||||
|
||||
vdev->flags |= pcidev->type;
|
||||
vdev->bdf.value = pcidev->virt_bdf;
|
||||
vdev->parent_user = vdev_in_sos;
|
||||
spinlock_release(&tgt_vm->vpci.lock);
|
||||
vdev_in_sos->user = vdev;
|
||||
pci_vdev_write_vbar(vdev, idx, pcidev->bar[idx]);
|
||||
}
|
||||
|
||||
vdev->flags |= pcidev->type;
|
||||
vdev->bdf.value = pcidev->virt_bdf;
|
||||
vdev->parent_user = vdev_in_sos;
|
||||
spinlock_release(&tgt_vm->vpci.lock);
|
||||
vdev_in_sos->user = vdev;
|
||||
} else {
|
||||
pr_fatal("%s, can't find PCI device %x:%x.%x for vm[%d] %x:%x.%x\n", __func__,
|
||||
pcidev->phys_bdf >> 8U, (pcidev->phys_bdf >> 3U) & 0x1fU, pcidev->phys_bdf & 0x7U,
|
||||
@ -812,34 +740,22 @@ int32_t vpci_assign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pc
|
||||
int32_t vpci_deassign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *pcidev)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
struct pci_vdev *vdev_in_sos, *vdev;
|
||||
struct pci_vdev *parent_vdev, *vdev;
|
||||
union pci_bdf bdf;
|
||||
struct acrn_vm *sos_vm;
|
||||
|
||||
bdf.value = pcidev->phys_bdf;
|
||||
sos_vm = get_sos_vm();
|
||||
spinlock_obtain(&sos_vm->vpci.lock);
|
||||
vdev_in_sos = pci_find_vdev(&sos_vm->vpci, bdf);
|
||||
if ((vdev_in_sos != NULL) && (vdev_in_sos->user != NULL) &&
|
||||
(vdev_in_sos->user->vpci == &tgt_vm->vpci) && (vdev_in_sos->pdev != NULL)) {
|
||||
vdev = vdev_in_sos->user;
|
||||
bdf.value = pcidev->virt_bdf;
|
||||
vdev = pci_find_vdev(&tgt_vm->vpci, bdf);
|
||||
if ((vdev != NULL) && (vdev->user == vdev) && (vdev->pdev != NULL) &&
|
||||
(vdev->pdev->bdf.value == pcidev->phys_bdf)) {
|
||||
parent_vdev = vdev->parent_user;
|
||||
|
||||
spinlock_obtain(&tgt_vm->vpci.lock);
|
||||
vdev->vdev_ops->deinit_vdev(vdev);
|
||||
|
||||
if (vdev != NULL) {
|
||||
ret = move_pt_device(tgt_vm->iommu, sos_vm->iommu, (uint8_t)(pcidev->phys_bdf >> 8U),
|
||||
(uint8_t)(pcidev->phys_bdf & 0xffU));
|
||||
if (ret != 0) {
|
||||
panic("failed to assign iommu device!");
|
||||
}
|
||||
|
||||
deinit_vmsi(vdev);
|
||||
|
||||
deinit_vmsix(vdev);
|
||||
if (parent_vdev != NULL) {
|
||||
spinlock_obtain(&parent_vdev->vpci->lock);
|
||||
parent_vdev->vdev_ops->init_vdev(parent_vdev);
|
||||
spinlock_release(&parent_vdev->vpci->lock);
|
||||
}
|
||||
spinlock_release(&tgt_vm->vpci.lock);
|
||||
|
||||
vdev_in_sos->user = NULL;
|
||||
} else {
|
||||
pr_fatal("%s, can't find PCI device %x:%x.%x for vm[%d] %x:%x.%x\n", __func__,
|
||||
pcidev->phys_bdf >> 8U, (pcidev->phys_bdf >> 3U) & 0x1fU, pcidev->phys_bdf & 0x7U,
|
||||
@ -847,7 +763,6 @@ int32_t vpci_deassign_pcidev(struct acrn_vm *tgt_vm, struct acrn_assign_pcidev *
|
||||
pcidev->virt_bdf >> 8U, (pcidev->virt_bdf >> 3U) & 0x1fU, pcidev->virt_bdf & 0x7U);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
spinlock_release(&sos_vm->vpci.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -120,13 +120,13 @@ void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock);
|
||||
void init_vmsi(struct pci_vdev *vdev);
|
||||
void read_vmsi_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
void write_vmsi_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
void deinit_vmsi(const struct pci_vdev *vdev);
|
||||
void deinit_vmsi(struct pci_vdev *vdev);
|
||||
|
||||
void init_vmsix(struct pci_vdev *vdev);
|
||||
int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *handler_private_data);
|
||||
void read_vmsix_cfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
void write_vmsix_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
|
||||
void deinit_vmsix(const struct pci_vdev *vdev);
|
||||
void deinit_vmsix(struct pci_vdev *vdev);
|
||||
|
||||
void init_vsriov(struct pci_vdev *vdev);
|
||||
void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
|
||||
|
@ -96,7 +96,7 @@ struct pci_vdev_ops {
|
||||
};
|
||||
|
||||
struct pci_vdev {
|
||||
const struct acrn_vpci *vpci;
|
||||
struct acrn_vpci *vpci;
|
||||
/* The bus/device/function triple of the virtual PCI device. */
|
||||
union pci_bdf bdf;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user