mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-18 19:57:31 +00:00
hv: vpci: remove vm structure pointer from vpci
We could use container_of to get vm structure pointer from vpci. So vm structure pointer is no need in vpci structure. Tracked-On: #4550 Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
a7768fdb6a
commit
0a7770cbb7
@ -42,7 +42,6 @@ static void vdev_pt_unmap_msix(struct pci_vdev *vdev)
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint64_t addr_hi, addr_lo;
|
uint64_t addr_hi, addr_lo;
|
||||||
struct pci_msix *msix = &vdev->msix;
|
struct pci_msix *msix = &vdev->msix;
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
|
||||||
|
|
||||||
/* Mask all table entries */
|
/* Mask all table entries */
|
||||||
for (i = 0U; i < msix->table_count; i++) {
|
for (i = 0U; i < msix->table_count; i++) {
|
||||||
@ -56,7 +55,7 @@ static void vdev_pt_unmap_msix(struct pci_vdev *vdev)
|
|||||||
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
||||||
addr_lo = round_page_down(addr_lo);
|
addr_lo = round_page_down(addr_lo);
|
||||||
addr_hi = round_page_up(addr_hi);
|
addr_hi = round_page_up(addr_hi);
|
||||||
unregister_mmio_emulation_handler(vm, addr_lo, addr_hi);
|
unregister_mmio_emulation_handler(vpci2vm(vdev->vpci), addr_lo, addr_hi);
|
||||||
msix->mmio_gpa = 0UL;
|
msix->mmio_gpa = 0UL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -71,10 +70,11 @@ void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock)
|
|||||||
struct pci_vbar *vbar;
|
struct pci_vbar *vbar;
|
||||||
uint64_t addr_hi, addr_lo;
|
uint64_t addr_hi, addr_lo;
|
||||||
struct pci_msix *msix = &vdev->msix;
|
struct pci_msix *msix = &vdev->msix;
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
|
||||||
|
|
||||||
vbar = &vdev->vbars[vdev->msix.table_bar];
|
vbar = &vdev->vbars[msix->table_bar];
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
addr_lo = vbar->base_gpa + msix->table_offset;
|
addr_lo = vbar->base_gpa + msix->table_offset;
|
||||||
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
addr_hi = addr_lo + (msix->table_count * MSIX_TABLE_ENTRY_SIZE);
|
||||||
addr_lo = round_page_down(addr_lo);
|
addr_lo = round_page_down(addr_lo);
|
||||||
@ -93,12 +93,11 @@ void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock)
|
|||||||
*/
|
*/
|
||||||
static void vdev_pt_unmap_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
static void vdev_pt_unmap_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||||
{
|
{
|
||||||
struct pci_vbar *vbar;
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
|
||||||
|
|
||||||
vbar = &vdev->vbars[idx];
|
|
||||||
|
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
ept_del_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
|
ept_del_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
|
||||||
vbar->base_gpa, /* GPA (old vbar) */
|
vbar->base_gpa, /* GPA (old vbar) */
|
||||||
vbar->size);
|
vbar->size);
|
||||||
@ -116,12 +115,11 @@ static void vdev_pt_unmap_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
*/
|
*/
|
||||||
static void vdev_pt_map_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
static void vdev_pt_map_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||||
{
|
{
|
||||||
struct pci_vbar *vbar;
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
|
||||||
|
|
||||||
vbar = &vdev->vbars[idx];
|
|
||||||
|
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
ept_add_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
|
ept_add_mr(vm, (uint64_t *)(vm->arch_vm.nworld_eptp),
|
||||||
vbar->base_hpa, /* HPA (pbar) */
|
vbar->base_hpa, /* HPA (pbar) */
|
||||||
vbar->base_gpa, /* GPA (new vbar) */
|
vbar->base_gpa, /* GPA (new vbar) */
|
||||||
@ -142,11 +140,13 @@ static void vdev_pt_map_mem_vbar(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
*/
|
*/
|
||||||
static void vdev_pt_allow_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
static void vdev_pt_allow_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||||
{
|
{
|
||||||
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
||||||
if (!is_sos_vm(vdev->vpci->vm)) {
|
if (!is_sos_vm(vm)) {
|
||||||
struct pci_vbar *vbar = &vdev->vbars[idx];
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
allow_guest_pio_access(vdev->vpci->vm, (uint16_t)vbar->base_gpa, (uint32_t)(vbar->size));
|
allow_guest_pio_access(vm, (uint16_t)vbar->base_gpa, (uint32_t)(vbar->size));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -159,11 +159,13 @@ static void vdev_pt_allow_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
*/
|
*/
|
||||||
static void vdev_pt_deny_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
static void vdev_pt_deny_io_vbar(struct pci_vdev *vdev, uint32_t idx)
|
||||||
{
|
{
|
||||||
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
/* For SOS, all port IO access is allowed by default, so skip SOS here */
|
||||||
if (!is_sos_vm(vdev->vpci->vm)) {
|
if (!is_sos_vm(vm)) {
|
||||||
struct pci_vbar *vbar = &vdev->vbars[idx];
|
struct pci_vbar *vbar = &vdev->vbars[idx];
|
||||||
if (vbar->base_gpa != 0UL) {
|
if (vbar->base_gpa != 0UL) {
|
||||||
deny_guest_pio_access(vdev->vpci->vm, (uint16_t)(vbar->base_gpa), (uint32_t)(vbar->size));
|
deny_guest_pio_access(vm, (uint16_t)(vbar->base_gpa), (uint32_t)(vbar->size));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -294,7 +296,7 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
|
|||||||
vbar->fixed = lo & (~mask);
|
vbar->fixed = lo & (~mask);
|
||||||
vbar->size = (uint64_t)size32 & mask;
|
vbar->size = (uint64_t)size32 & mask;
|
||||||
|
|
||||||
if (is_prelaunched_vm(vdev->vpci->vm)) {
|
if (is_prelaunched_vm(vpci2vm(vdev->vpci))) {
|
||||||
lo = (uint32_t)vdev->pci_dev_config->vbar_base[idx];
|
lo = (uint32_t)vdev->pci_dev_config->vbar_base[idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,7 +324,7 @@ static void init_bars(struct pci_vdev *vdev, bool is_sriov_bar)
|
|||||||
vbar->mask = size32;
|
vbar->mask = size32;
|
||||||
vbar->type = PCIBAR_MEM64HI;
|
vbar->type = PCIBAR_MEM64HI;
|
||||||
|
|
||||||
if (is_prelaunched_vm(vdev->vpci->vm)) {
|
if (is_prelaunched_vm(vpci2vm(vdev->vpci))) {
|
||||||
hi = (uint32_t)(vdev->pci_dev_config->vbar_base[idx - 1U] >> 32U);
|
hi = (uint32_t)(vdev->pci_dev_config->vbar_base[idx - 1U] >> 32U);
|
||||||
}
|
}
|
||||||
/* if it is parsing SRIOV VF BARs, no need to write vdev bars */
|
/* if it is parsing SRIOV VF BARs, no need to write vdev bars */
|
||||||
@ -380,7 +382,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
|
|||||||
/* Initialize the vdev BARs except SRIOV VF, VF BARs are initialized directly from create_vf function */
|
/* Initialize the vdev BARs except SRIOV VF, VF BARs are initialized directly from create_vf function */
|
||||||
if (vdev->phyfun == NULL) {
|
if (vdev->phyfun == NULL) {
|
||||||
init_bars(vdev, is_pf_vdev);
|
init_bars(vdev, is_pf_vdev);
|
||||||
if (is_prelaunched_vm(vdev->vpci->vm) && (!is_pf_vdev)) {
|
if (is_prelaunched_vm(vpci2vm(vdev->vpci)) && (!is_pf_vdev)) {
|
||||||
pci_command = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U);
|
pci_command = (uint16_t)pci_pdev_read_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U);
|
||||||
|
|
||||||
/* Disable INTX */
|
/* Disable INTX */
|
||||||
@ -388,7 +390,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
|
|||||||
pci_pdev_write_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U, pci_command);
|
pci_pdev_write_cfg(vdev->pdev->bdf, PCIR_COMMAND, 2U, pci_command);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!is_own_device(vdev->phyfun->vpci->vm, vdev)) {
|
if (!is_own_device(vpci2vm(vdev->phyfun->vpci), vdev)) {
|
||||||
/* VF is assigned to a UOS */
|
/* VF is assigned to a UOS */
|
||||||
uint32_t vid, did;
|
uint32_t vid, did;
|
||||||
|
|
||||||
@ -424,7 +426,7 @@ void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev)
|
|||||||
void deinit_vdev_pt(struct pci_vdev *vdev) {
|
void deinit_vdev_pt(struct pci_vdev *vdev) {
|
||||||
|
|
||||||
/* Check if the vdev is an unassigned SR-IOV VF device */
|
/* Check if the vdev is an unassigned SR-IOV VF device */
|
||||||
if ((vdev->phyfun != NULL) && (is_own_device(vdev->phyfun->vpci->vm, vdev))) {
|
if ((vdev->phyfun != NULL) && (is_own_device(vpci2vm(vdev->phyfun->vpci), vdev))) {
|
||||||
uint32_t bar_idx;
|
uint32_t bar_idx;
|
||||||
|
|
||||||
/* Delete VF MMIO from EPT table since the VF physical device has gone */
|
/* Delete VF MMIO from EPT table since the VF physical device has gone */
|
||||||
|
@ -113,7 +113,6 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
enum pci_bar_type type;
|
enum pci_bar_type type;
|
||||||
uint64_t base = 0UL;
|
uint64_t base = 0UL;
|
||||||
uint32_t lo, hi, offset;
|
uint32_t lo, hi, offset;
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
|
||||||
|
|
||||||
vbar = &vdev->vbars[idx];
|
vbar = &vdev->vbars[idx];
|
||||||
offset = pci_bar_offset(idx);
|
offset = pci_bar_offset(idx);
|
||||||
@ -137,7 +136,7 @@ static void pci_vdev_update_vbar_base(struct pci_vdev *vdev, uint32_t idx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((base != 0UL) && !ept_is_mr_valid(vm, base, vdev->vbars[idx].size)) {
|
if ((base != 0UL) && !ept_is_mr_valid(vpci2vm(vdev->vpci), base, vdev->vbars[idx].size)) {
|
||||||
pr_fatal("%s, %x:%x.%x set invalid bar[%d] base: 0x%lx, size: 0x%lx\n", __func__,
|
pr_fatal("%s, %x:%x.%x set invalid bar[%d] base: 0x%lx, size: 0x%lx\n", __func__,
|
||||||
vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, idx, base, vdev->vbars[idx].size);
|
vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, idx, base, vdev->vbars[idx].size);
|
||||||
/* If guest set a invalid GPA, ignore it temporarily */
|
/* If guest set a invalid GPA, ignore it temporarily */
|
||||||
|
@ -64,7 +64,7 @@ static void remap_vmsi(const struct pci_vdev *vdev)
|
|||||||
{
|
{
|
||||||
struct ptirq_msi_info info = {};
|
struct ptirq_msi_info info = {};
|
||||||
union pci_bdf pbdf = vdev->pdev->bdf;
|
union pci_bdf pbdf = vdev->pdev->bdf;
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
uint32_t capoff = vdev->msi.capoff;
|
uint32_t capoff = vdev->msi.capoff;
|
||||||
uint32_t vmsi_msgdata, vmsi_addrlo, vmsi_addrhi = 0U;
|
uint32_t vmsi_msgdata, vmsi_addrlo, vmsi_addrhi = 0U;
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ void write_vmsi_cfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint
|
|||||||
void deinit_vmsi(const struct pci_vdev *vdev)
|
void deinit_vmsi(const struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
if (has_msi_cap(vdev)) {
|
if (has_msi_cap(vdev)) {
|
||||||
ptirq_remove_msix_remapping(vdev->vpci->vm, vdev->bdf.value, 1U);
|
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->bdf.value, 1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ static void remap_one_vmsix_entry(const struct pci_vdev *vdev, uint32_t index)
|
|||||||
info.vmsi_addr.full = vdev->msix.table_entries[index].addr;
|
info.vmsi_addr.full = vdev->msix.table_entries[index].addr;
|
||||||
info.vmsi_data.full = vdev->msix.table_entries[index].data;
|
info.vmsi_data.full = vdev->msix.table_entries[index].data;
|
||||||
|
|
||||||
ret = ptirq_prepare_msix_remap(vdev->vpci->vm, vdev->bdf.value, vdev->pdev->bdf.value, (uint16_t)index, &info);
|
ret = ptirq_prepare_msix_remap(vpci2vm(vdev->vpci), vdev->bdf.value, vdev->pdev->bdf.value, (uint16_t)index, &info);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
/* Write the table entry to the physical structure */
|
/* Write the table entry to the physical structure */
|
||||||
pentry = get_msix_table_entry(vdev, index);
|
pentry = get_msix_table_entry(vdev, index);
|
||||||
@ -266,7 +266,7 @@ void deinit_vmsix(const struct pci_vdev *vdev)
|
|||||||
{
|
{
|
||||||
if (has_msix_cap(vdev)) {
|
if (has_msix_cap(vdev)) {
|
||||||
if (vdev->msix.table_count != 0U) {
|
if (vdev->msix.table_count != 0U) {
|
||||||
ptirq_remove_msix_remapping(vdev->vpci->vm, vdev->bdf.value, vdev->msix.table_count);
|
ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->bdf.value, vdev->msix.table_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,6 @@ void vpci_init(struct acrn_vm *vm)
|
|||||||
struct acrn_vm_config *vm_config;
|
struct acrn_vm_config *vm_config;
|
||||||
uint64_t pci_mmcfg_base;
|
uint64_t pci_mmcfg_base;
|
||||||
|
|
||||||
vm->vpci.vm = vm;
|
|
||||||
vm->iommu = create_iommu_domain(vm->vm_id, hva2hpa(vm->arch_vm.nworld_eptp), 48U);
|
vm->iommu = create_iommu_domain(vm->vm_id, hva2hpa(vm->arch_vm.nworld_eptp), 48U);
|
||||||
/* Build up vdev list for vm */
|
/* Build up vdev list for vm */
|
||||||
vpci_init_vdevs(vm);
|
vpci_init_vdevs(vm);
|
||||||
@ -302,7 +301,7 @@ void vpci_cleanup(struct acrn_vm *vm)
|
|||||||
static void assign_vdev_pt_iommu_domain(struct pci_vdev *vdev)
|
static void assign_vdev_pt_iommu_domain(struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
struct acrn_vm *vm = vdev->vpci->vm;
|
struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
ret = move_pt_device(NULL, vm->iommu, (uint8_t)vdev->pdev->bdf.bits.b,
|
ret = move_pt_device(NULL, vm->iommu, (uint8_t)vdev->pdev->bdf.bits.b,
|
||||||
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
|
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
|
||||||
@ -320,7 +319,7 @@ static void assign_vdev_pt_iommu_domain(struct pci_vdev *vdev)
|
|||||||
static void remove_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
|
static void remove_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
const struct acrn_vm *vm = vdev->vpci->vm;
|
const struct acrn_vm *vm = vpci2vm(vdev->vpci);
|
||||||
|
|
||||||
ret = move_pt_device(vm->iommu, NULL, (uint8_t)vdev->pdev->bdf.bits.b,
|
ret = move_pt_device(vm->iommu, NULL, (uint8_t)vdev->pdev->bdf.bits.b,
|
||||||
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
|
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
|
||||||
@ -360,8 +359,8 @@ static struct pci_vdev *find_available_vdev(struct acrn_vpci *vpci, union pci_bd
|
|||||||
/* In the case a device is assigned to a UOS and is not in a zombie state */
|
/* In the case a device is assigned to a UOS and is not in a zombie state */
|
||||||
if ((vdev->new_owner != NULL) && (vdev->new_owner->vpci != NULL)) {
|
if ((vdev->new_owner != NULL) && (vdev->new_owner->vpci != NULL)) {
|
||||||
/* the SOS is able to access, if and only if the SOS has higher severity than the UOS. */
|
/* the SOS is able to access, if and only if the SOS has higher severity than the UOS. */
|
||||||
if (get_vm_severity(vpci->vm->vm_id) <
|
if (get_vm_severity(vpci2vm(vpci)->vm_id) <
|
||||||
get_vm_severity(vdev->new_owner->vpci->vm->vm_id)) {
|
get_vm_severity(vpci2vm(vdev->new_owner->vpci)->vm_id)) {
|
||||||
vdev = NULL;
|
vdev = NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -554,7 +553,7 @@ static int32_t vpci_read_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
|||||||
if (vdev != NULL) {
|
if (vdev != NULL) {
|
||||||
ret = vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
|
ret = vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
|
||||||
} else {
|
} else {
|
||||||
if (is_postlaunched_vm(vpci->vm)) {
|
if (is_postlaunched_vm(vpci2vm(vpci))) {
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -576,7 +575,7 @@ static int32_t vpci_write_cfg(struct acrn_vpci *vpci, union pci_bdf bdf,
|
|||||||
if (vdev != NULL) {
|
if (vdev != NULL) {
|
||||||
ret = vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
|
ret = vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
|
||||||
} else {
|
} else {
|
||||||
if (!is_postlaunched_vm(vpci->vm)) {
|
if (!is_postlaunched_vm(vpci2vm(vpci))) {
|
||||||
pr_acrnlog("%s %x:%x.%x not found! off: 0x%x, val: 0x%x\n", __func__,
|
pr_acrnlog("%s %x:%x.%x not found! off: 0x%x, val: 0x%x\n", __func__,
|
||||||
bdf.bits.b, bdf.bits.d, bdf.bits.f, offset, val);
|
bdf.bits.b, bdf.bits.d, bdf.bits.f, offset, val);
|
||||||
} else {
|
} else {
|
||||||
@ -642,7 +641,7 @@ static void vpci_init_vdevs(struct acrn_vm *vm)
|
|||||||
{
|
{
|
||||||
uint32_t idx;
|
uint32_t idx;
|
||||||
struct acrn_vpci *vpci = &(vm->vpci);
|
struct acrn_vpci *vpci = &(vm->vpci);
|
||||||
const struct acrn_vm_config *vm_config = get_vm_config(vpci->vm->vm_id);
|
const struct acrn_vm_config *vm_config = get_vm_config(vpci2vm(vpci)->vm_id);
|
||||||
|
|
||||||
for (idx = 0U; idx < vm_config->pci_dev_num; idx++) {
|
for (idx = 0U; idx < vm_config->pci_dev_num; idx++) {
|
||||||
(void)vpci_init_vdev(vpci, &vm_config->pci_devs[idx], NULL);
|
(void)vpci_init_vdev(vpci, &vm_config->pci_devs[idx], NULL);
|
||||||
|
@ -30,8 +30,14 @@
|
|||||||
#ifndef VPCI_PRIV_H_
|
#ifndef VPCI_PRIV_H_
|
||||||
#define VPCI_PRIV_H_
|
#define VPCI_PRIV_H_
|
||||||
|
|
||||||
|
#include <list.h>
|
||||||
#include <pci.h>
|
#include <pci.h>
|
||||||
|
|
||||||
|
static inline struct acrn_vm *vpci2vm(const struct acrn_vpci *vpci)
|
||||||
|
{
|
||||||
|
return container_of(vpci, struct acrn_vm, vpci);
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_quirk_ptdev(const struct pci_vdev *vdev)
|
static inline bool is_quirk_ptdev(const struct pci_vdev *vdev)
|
||||||
{
|
{
|
||||||
return ((vdev->flags & QUIRK_PTDEV) != 0U);
|
return ((vdev->flags & QUIRK_PTDEV) != 0U);
|
||||||
|
@ -96,7 +96,7 @@ static void create_vf(struct pci_vdev *pf_vdev, union pci_bdf vf_bdf, uint16_t v
|
|||||||
|
|
||||||
dev_cfg = init_one_dev_config(vf_pdev);
|
dev_cfg = init_one_dev_config(vf_pdev);
|
||||||
if (dev_cfg != NULL) {
|
if (dev_cfg != NULL) {
|
||||||
vf_vdev = vpci_init_vdev(&pf_vdev->vpci->vm->vpci, dev_cfg, pf_vdev);
|
vf_vdev = vpci_init_vdev(&vpci2vm(pf_vdev->vpci)->vpci, dev_cfg, pf_vdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ static void enable_vfs(struct pci_vdev *pf_vdev)
|
|||||||
* The VF maybe have already existed but it is a zombie instance that vf_vdev->vpci
|
* The VF maybe have already existed but it is a zombie instance that vf_vdev->vpci
|
||||||
* is NULL, in this case, we need to make the vf_vdev available again in here.
|
* is NULL, in this case, we need to make the vf_vdev available again in here.
|
||||||
*/
|
*/
|
||||||
vf_vdev = pci_find_vdev(&pf_vdev->vpci->vm->vpci, vf_bdf);
|
vf_vdev = pci_find_vdev(&vpci2vm(pf_vdev->vpci)->vpci, vf_bdf);
|
||||||
if (vf_vdev == NULL) {
|
if (vf_vdev == NULL) {
|
||||||
create_vf(pf_vdev, vf_bdf, idx);
|
create_vf(pf_vdev, vf_bdf, idx);
|
||||||
} else {
|
} else {
|
||||||
@ -248,7 +248,7 @@ static void disable_vfs(struct pci_vdev *pf_vdev)
|
|||||||
|
|
||||||
bdf.fields.bus = get_vf_bus(pf_vdev, first, stride, idx);
|
bdf.fields.bus = get_vf_bus(pf_vdev, first, stride, idx);
|
||||||
bdf.fields.devfun = get_vf_devfun(pf_vdev, first, stride, idx);
|
bdf.fields.devfun = get_vf_devfun(pf_vdev, first, stride, idx);
|
||||||
vf_vdev = pci_find_vdev(&pf_vdev->vpci->vm->vpci, bdf);
|
vf_vdev = pci_find_vdev(&vpci2vm(pf_vdev->vpci)->vpci, bdf);
|
||||||
if ((vf_vdev != NULL) && (!is_zombie_vf(vf_vdev))) {
|
if ((vf_vdev != NULL) && (!is_zombie_vf(vf_vdev))) {
|
||||||
/* set disabled VF as zombie vdev instance */
|
/* set disabled VF as zombie vdev instance */
|
||||||
vf_vdev->vdev_ops->deinit_vdev(vf_vdev);
|
vf_vdev->vdev_ops->deinit_vdev(vf_vdev);
|
||||||
|
@ -139,13 +139,14 @@ union pci_cfg_addr_reg {
|
|||||||
|
|
||||||
struct acrn_vpci {
|
struct acrn_vpci {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct acrn_vm *vm;
|
|
||||||
union pci_cfg_addr_reg addr;
|
union pci_cfg_addr_reg addr;
|
||||||
uint64_t pci_mmcfg_base;
|
uint64_t pci_mmcfg_base;
|
||||||
uint32_t pci_vdev_cnt;
|
uint32_t pci_vdev_cnt;
|
||||||
struct pci_vdev pci_vdevs[CONFIG_MAX_PCI_DEV_NUM];
|
struct pci_vdev pci_vdevs[CONFIG_MAX_PCI_DEV_NUM];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct acrn_vm;
|
||||||
|
|
||||||
extern const struct pci_vdev_ops vhostbridge_ops;
|
extern const struct pci_vdev_ops vhostbridge_ops;
|
||||||
extern const struct pci_vdev_ops vpci_bridge_ops;
|
extern const struct pci_vdev_ops vpci_bridge_ops;
|
||||||
void vpci_init(struct acrn_vm *vm);
|
void vpci_init(struct acrn_vm *vm);
|
||||||
|
Loading…
Reference in New Issue
Block a user