hv: Remove need for init_fallback_iommu_domain and fallback_iommu_domain

In the presence of SOS, ACRN uses fallback_iommu_domain which is the same
used by SOS, to assign domain to devices during ACRN init. Also it uses
fallback_iommu_domain when DM requests ACRN to remove device from UOS domain.
This patch changes the design of assign/remove_iommu_device to avoid the
concept of fallback_iommu_domain and its setup. This way ACRN can commonly
treat pre-launched VMs bringup w.r.t. IOMMU domain creation.

Tracked-On: #2965
Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Sainath Grandhi 2019-04-13 11:57:25 -07:00 committed by ACRN System Integration
parent 948d58fb9c
commit 824caf8ce0
7 changed files with 40 additions and 116 deletions

View File

@ -306,9 +306,6 @@ deinitialization:
.. doxygenfunction:: init_iommu
:project: Project ACRN
.. doxygenfunction:: init_fallback_iommu_domain
:project: Project ACRN
runtime
=======
@ -326,9 +323,5 @@ The following API are provided during runtime:
.. doxygenfunction:: resume_iommu
:project: Project ACRN
.. doxygenfunction:: assign_pt_device
.. doxygenfunction:: move_pt_device
:project: Project ACRN
.. doxygenfunction:: unassign_pt_device
:project: Project ACRN

View File

@ -341,9 +341,7 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
prepare_sos_vm_memmap(vm);
status = firmware_init_vm_boot_info(vm);
if (status == 0) {
init_fallback_iommu_domain(vm->iommu, vm->vm_id, vm->arch_vm.nworld_eptp);
} else {
if (status != 0) {
need_cleanup = true;
}

View File

@ -189,7 +189,6 @@ bool iommu_snoop_supported(const struct iommu_domain *iommu)
static struct dmar_drhd_rt dmar_drhd_units[CONFIG_MAX_IOMMU_NUM];
static bool iommu_page_walk_coherent = true;
static struct iommu_domain *fallback_iommu_domain;
static uint32_t qi_status = 0U;
static struct dmar_info *platform_dmar_info = NULL;
@ -1158,7 +1157,6 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s
ret = -EINVAL;
} else if (dmar_unit->drhd->ignore) {
dev_dbg(ACRN_DBG_IOMMU, "device is ignored :0x%x:%x.%x", bus, pci_slot(devfun), pci_func(devfun));
ret = -EINVAL;
} else {
root_table = (struct dmar_entry *)hpa2hva(dmar_unit->root_table_addr);
root_entry = root_table + bus;
@ -1261,7 +1259,11 @@ void destroy_iommu_domain(struct iommu_domain *domain)
(void)memset(domain, 0U, sizeof(*domain));
}
int32_t assign_pt_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
/*
* @pre (from_domain != NULL) || (to_domain != NULL)
*/
int32_t move_pt_device(const struct iommu_domain *from_domain, struct iommu_domain *to_domain, uint8_t bus, uint8_t devfun)
{
int32_t status = 0;
uint16_t bus_local = bus;
@ -1269,32 +1271,12 @@ int32_t assign_pt_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfu
/* TODO: check if the device assigned */
if (bus_local < CONFIG_IOMMU_BUS_NUM) {
if (fallback_iommu_domain != NULL) {
status = remove_iommu_device(fallback_iommu_domain, 0U, bus, devfun);
if (from_domain != NULL) {
status = remove_iommu_device(from_domain, 0U, bus, devfun);
}
if (status == 0) {
status = add_iommu_device(domain, 0U, bus, devfun);
}
} else {
status = -EINVAL;
}
return status;
}
int32_t unassign_pt_device(const struct iommu_domain *domain, uint8_t bus, uint8_t devfun)
{
int32_t status = 0;
uint16_t bus_local = bus;
/* TODO: check if the device assigned */
if (bus_local < CONFIG_IOMMU_BUS_NUM) {
status = remove_iommu_device(domain, 0U, bus, devfun);
if ((status == 0) && (fallback_iommu_domain != NULL)) {
status = add_iommu_device(fallback_iommu_domain, 0U, bus, devfun);
if ((status == 0) && (to_domain != NULL)) {
status = add_iommu_device(to_domain, 0U, bus, devfun);
}
} else {
status = -EINVAL;
@ -1348,28 +1330,6 @@ int32_t init_iommu(void)
return ret;
}
void init_fallback_iommu_domain(struct iommu_domain *iommu_dmn, uint16_t vm_id, void *eptp)
{
uint16_t bus;
uint16_t devfun;
iommu_dmn = create_iommu_domain(vm_id, hva2hpa(eptp), 48U);
fallback_iommu_domain = (struct iommu_domain *) iommu_dmn;
if (fallback_iommu_domain == NULL) {
pr_err("fallback_iommu_domain is NULL\n");
} else {
for (bus = 0U; bus < CONFIG_IOMMU_BUS_NUM; bus++) {
for (devfun = 0U; devfun <= 255U; devfun++) {
if (add_iommu_device(fallback_iommu_domain, 0U, (uint8_t)bus, (uint8_t)devfun) != 0) {
/* the panic only occurs before fallback_iommu_domain starts running in sharing mode */
panic("Failed to add %x:%x.%x to fallback_iommu_domain domain", bus, pci_slot(devfun), pci_func(devfun));
}
}
}
}
}
int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte, uint16_t index)
{
struct dmar_drhd_rt *dmar_unit;

View File

@ -890,7 +890,7 @@ int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
}
}
if (bdf_valid && iommu_valid) {
ret = assign_pt_device(target_vm->iommu,
ret = move_pt_device(vm->iommu, target_vm->iommu,
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
}
} else {
@ -934,7 +934,7 @@ int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
}
if (bdf_valid) {
ret = unassign_pt_device(target_vm->iommu,
ret = move_pt_device(target_vm->iommu, vm->iommu,
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
}
}

View File

@ -293,23 +293,14 @@ static inline bool is_valid_bar(const struct pci_bar *bar)
* @pre vdev != NULL
* @pre vdev->vpci != NULL
* @pre vdev->vpci->vm != NULL
* @pre vdev->vpci->vm->iommu != NULL
*/
static void assign_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
{
int32_t ret;
struct acrn_vm *vm = vdev->vpci->vm;
/* Create an iommu domain for target VM if not created */
if (vm->iommu == NULL) {
if (vm->arch_vm.nworld_eptp == 0UL) {
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info);
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
}
vm->iommu = create_iommu_domain(vm->vm_id,
hva2hpa(vm->arch_vm.nworld_eptp), 48U);
}
ret = assign_pt_device(vm->iommu, (uint8_t)vdev->pdev->bdf.bits.b,
ret = move_pt_device(NULL, vm->iommu, (uint8_t)vdev->pdev->bdf.bits.b,
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
if (ret != 0) {
panic("failed to assign iommu device!");
@ -320,13 +311,14 @@ static void assign_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
* @pre vdev != NULL
* @pre vdev->vpci != NULL
* @pre vdev->vpci->vm != NULL
* @pre vdev->vpci->vm->iommu != NULL
*/
static void remove_vdev_pt_iommu_domain(const struct pci_vdev *vdev)
{
int32_t ret;
struct acrn_vm *vm = vdev->vpci->vm;
ret = unassign_pt_device(vm->iommu, (uint8_t)vdev->pdev->bdf.bits.b,
ret = move_pt_device(vm->iommu, NULL, (uint8_t)vdev->pdev->bdf.bits.b,
(uint8_t)(vdev->pdev->bdf.value & 0xFFU));
if (ret != 0) {
/*
@ -380,8 +372,10 @@ static void partition_mode_pdev_init(struct pci_vdev *vdev, union pci_bdf pbdf)
/**
* @pre vm != NULL
* @pre vm->vpci.pci_vdev_cnt <= CONFIG_MAX_PCI_DEV_NUM
* @pre vm->iommu == NULL
* @pre vm->arch_vm.nworld_eptp != NULL
*/
int32_t partition_mode_vpci_init(const struct acrn_vm *vm)
int32_t partition_mode_vpci_init(struct acrn_vm *vm)
{
struct acrn_vpci *vpci = (struct acrn_vpci *)&(vm->vpci);
struct pci_vdev *vdev;
@ -389,8 +383,10 @@ int32_t partition_mode_vpci_init(const struct acrn_vm *vm)
struct acrn_vm_pci_ptdev_config *ptdev_config;
uint32_t i;
vpci->pci_vdev_cnt = vm_config->pci_ptdev_num;
vm->iommu = create_iommu_domain(vm->vm_id,
hva2hpa(vm->arch_vm.nworld_eptp), 48U);
vpci->pci_vdev_cnt = vm_config->pci_ptdev_num;
for (i = 0U; i < vpci->pci_vdev_cnt; i++) {
vdev = &vpci->pci_vdevs[i];
vdev->vpci = vpci;
@ -555,6 +551,8 @@ static void init_vdev_for_pdev(struct pci_pdev *pdev, const void *vm)
if (has_msix_cap(vdev)) {
vdev_pt_remap_msix_table_bar(vdev);
}
assign_vdev_pt_iommu_domain(vdev);
}
}
@ -562,9 +560,14 @@ static void init_vdev_for_pdev(struct pci_pdev *pdev, const void *vm)
/**
* @pre vm != NULL
* @pre is_sos_vm(vm) == true
* @pre vm->iommu == NULL
* @pre vm->arch_vm.nworld_eptp != NULL
*/
int32_t sharing_mode_vpci_init(const struct acrn_vm *vm)
int32_t sharing_mode_vpci_init(struct acrn_vm *vm)
{
vm->iommu = create_iommu_domain(vm->vm_id,
hva2hpa(vm->arch_vm.nworld_eptp), 48U);
/* Build up vdev array for sos_vm */
pci_pdev_foreach(init_vdev_for_pdev, vm);
@ -584,6 +587,8 @@ void sharing_mode_vpci_deinit(const struct acrn_vm *vm)
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
vdev = (struct pci_vdev *)&(vm->vpci.pci_vdevs[i]);
remove_vdev_pt_iommu_domain(vdev);
vmsi_deinit(vdev);
vmsix_deinit(vdev);

View File

@ -101,14 +101,14 @@ struct pci_vdev *pci_find_vdev_by_vbdf(const struct acrn_vpci *vpci, union pci_b
struct pci_vdev *pci_find_vdev_by_pbdf(const struct acrn_vpci *vpci, union pci_bdf pbdf);
int32_t partition_mode_vpci_init(const struct acrn_vm *vm);
int32_t partition_mode_vpci_init(struct acrn_vm *vm);
void partition_mode_cfgread(const struct acrn_vpci *vpci, union pci_bdf vbdf,
uint32_t offset, uint32_t bytes, uint32_t *val);
void partition_mode_cfgwrite(const struct acrn_vpci *vpci, union pci_bdf vbdf,
uint32_t offset, uint32_t bytes, uint32_t val);
void partition_mode_vpci_deinit(const struct acrn_vm *vm);
int32_t sharing_mode_vpci_init(const struct acrn_vm *vm);
int32_t sharing_mode_vpci_init(struct acrn_vm *vm);
void sharing_mode_cfgread(struct acrn_vpci *vpci, union pci_bdf bdf,
uint32_t offset, uint32_t bytes, uint32_t *val);
void sharing_mode_cfgwrite(__unused struct acrn_vpci *vpci, union pci_bdf bdf,

View File

@ -541,9 +541,11 @@ struct iommu_domain;
/**
* @brief Assign a device specified by bus & devfun to a iommu domain.
*
* Remove the device from the fallback iommu domain (if present), and add it to the specific domain.
* Remove the device from the from_domain (if non-NULL), and add it to the to_domain (if non-NULL).
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
*
* @param[in] domain iommu domain the device is assigned to
* @param[in] from_domain iommu domain from which the device is removed from
* @param[in] to_domain iommu domain to which the device is assgined to
* @param[in] bus the 8-bit bus number of the device
* @param[in] devfun the 8-bit device(5-bit):function(3-bit) of the device
*
@ -553,24 +555,7 @@ struct iommu_domain;
* @pre domain != NULL
*
*/
int32_t assign_pt_device(struct iommu_domain *domain, uint8_t bus, uint8_t devfun);
/**
* @brief Unassign a device specified by bus & devfun from a iommu domain .
*
* Remove the device from the specific domain, and then add it to the fallback iommu domain (if present).
*
* @param[in] domain iommu domain the device is assigned to
* @param[in] bus the 8-bit bus number of the device
* @param[in] devfun the 8-bit device(5-bit):function(3-bit) of the device
*
* @retval 0 on success.
* @retval 1 fail to unassign the device
*
* @pre domain != NULL
*
*/
int32_t unassign_pt_device(const struct iommu_domain *domain, uint8_t bus, uint8_t devfun);
int32_t move_pt_device(const struct iommu_domain *from_domain, struct iommu_domain *to_domain, uint8_t bus, uint8_t devfun);
/**
* @brief Create a iommu domain for a VM specified by vm_id.
@ -649,23 +634,6 @@ void resume_iommu(void);
*/
int32_t init_iommu(void);
/**
* @brief Init fallback iommu domain of iommu.
*
* Create fallback iommu domain using the Normal World's EPT table of fallback iommu as address translation table.
* All PCI devices are added to the fallback iommu domain when creating it.
*
* @param[in] iommu_dmn pointer to fallback iommu domain
* @param[in] vm_id ID of the VM for which iommu_domain needs to be created
* @param[in] eptp EPT hieararchy table
*
* @pre iommu shall point to fallback iommu domain
*
* @remark to reduce boot time & memory cost, a config IOMMU_INIT_BUS_LIMIT, which limit the bus number.
*
*/
void init_fallback_iommu_domain(struct iommu_domain *iommu_dmn, uint16_t vm_id, void *eptp);
/**
* @brief check the iommu if support cache snoop.
*