HV: rename the term of vm0 to sos vm

Under sharing mode, VM0 is identical with SOS VM. But the coupling of
SOS VM and VM 0 is not friendly for partition mode.

This patch is a pure term change of vm0 to sos VM, it does not change
any code logic or senmantic.

Tracked-On: #2291

Signed-off-by: Victor Sun <victor.sun@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Victor Sun
2019-01-21 14:48:31 +08:00
committed by Eddie Dong
parent 55e5ed2e1a
commit 49e6deaf26
39 changed files with 174 additions and 174 deletions

View File

@@ -279,11 +279,11 @@ config PLATFORM_RAM_SIZE
(MMIO not included).
config SOS_RAM_SIZE
hex "Size of the vm0 (SOS) RAM"
hex "Size of the Service OS (SOS) RAM"
default 0x200000000 if PLATFORM_SBL
default 0x400000000 if PLATFORM_UEFI
help
A 64-bit integer indicating the size of the vm0 (SOS) RAM (MMIO not
A 64-bit integer indicating the size of the Service OS RAM (MMIO not
included).
config UOS_RAM_SIZE

View File

@@ -182,7 +182,7 @@ ptirq_build_physical_rte(struct acrn_vm *vm, struct ptirq_remapping_info *entry)
/* add msix entry for a vm, based on msi id (phys_bdf+msix_index)
* - if the entry not be added by any vm, allocate it
* - if the entry already be added by vm0, then change the owner to current vm
* - if the entry already be added by sos_vm, then change the owner to current vm
* - if the entry already be added by other vm, return NULL
*/
static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
@@ -209,7 +209,7 @@ static struct ptirq_remapping_info *add_msix_remapping(struct acrn_vm *vm,
}
}
} else if (entry->vm != vm) {
if (is_vm0(entry->vm)) {
if (is_sos_vm(entry->vm)) {
entry->vm = vm;
entry->virt_sid.msi_id.bdf = virt_bdf;
} else {
@@ -257,7 +257,7 @@ remove_msix_remapping(const struct acrn_vm *vm, uint16_t virt_bdf, uint32_t entr
/* add intx entry for a vm, based on intx id (phys_pin)
* - if the entry not be added by any vm, allocate it
* - if the entry already be added by vm0, then change the owner to current vm
* - if the entry already be added by sos_vm, then change the owner to current vm
* - if the entry already be added by other vm, return NULL
*/
static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint32_t virt_pin,
@@ -292,7 +292,7 @@ static struct ptirq_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint3
pr_err("INTX re-add vpin %d", virt_pin);
}
} else if (entry->vm != vm) {
if (is_vm0(entry->vm)) {
if (is_sos_vm(entry->vm)) {
entry->vm = vm;
entry->virt_sid.value = virt_sid.value;
} else {
@@ -511,14 +511,14 @@ int32_t ptirq_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf,
* Device Model should pre-hold the mapping entries by calling
* ptirq_add_msix_remapping for UOS.
*
* For SOS(vm0), it adds the mapping entries at runtime, if the
* For SOS(sos_vm), it adds the mapping entries at runtime, if the
* entry already be held by others, return error.
*/
spinlock_obtain(&ptdev_lock);
entry = ptirq_lookup_entry_by_sid(PTDEV_INTR_MSI, &virt_sid, vm);
if (entry == NULL) {
/* VM0 we add mapping dynamically */
if (is_vm0(vm)) {
/* SOS_VM we add mapping dynamically */
if (is_sos_vm(vm)) {
entry = add_msix_remapping(vm, virt_bdf, virt_bdf, entry_nr);
if (entry == NULL) {
pr_err("dev-assign: msi entry exist in others");
@@ -599,12 +599,12 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_pin, uint32_t vpi
* Device Model should pre-hold the mapping entries by calling
* ptirq_add_intx_remapping for UOS.
*
* For SOS(vm0), it adds the mapping entries at runtime, if the
* For SOS(sos_vm), it adds the mapping entries at runtime, if the
* entry already be held by others, return error.
*/
/* no remap for hypervisor owned intx */
if (is_vm0(vm) && hv_used_dbg_intx(virt_sid.intx_id.pin)) {
if (is_sos_vm(vm) && hv_used_dbg_intx(virt_sid.intx_id.pin)) {
status = -ENODEV;
}
@@ -615,9 +615,9 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_pin, uint32_t vpi
spinlock_obtain(&ptdev_lock);
entry = ptirq_lookup_entry_by_vpin(vm, virt_pin, pic_pin);
if (entry == NULL) {
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
/* for vm0, there is chance of vpin source switch
/* for sos_vm, there is chance of vpin source switch
* between vPIC & vIOAPIC for one legacy phys_pin.
*
* here checks if there is already mapping entry from
@@ -680,7 +680,7 @@ int32_t ptirq_intx_pin_remap(struct acrn_vm *vm, uint32_t virt_pin, uint32_t vpi
}
/* @pre vm != NULL
* except vm0, Device Model should call this function to pre-hold ptdev intx
* except sos_vm, Device Model should call this function to pre-hold ptdev intx
* entries:
* - the entry is identified by phys_pin:
* one entry vs. one phys_pin
@@ -708,7 +708,7 @@ void ptirq_remove_intx_remapping(struct acrn_vm *vm, uint32_t virt_pin, bool pic
spinlock_release(&ptdev_lock);
}
/* except vm0, Device Model should call this function to pre-hold ptdev msi
/* except sos_vm, Device Model should call this function to pre-hold ptdev msi
* entries:
* - the entry is identified by phys_bdf:msi_idx:
* one entry vs. one phys_bdf:msi_idx

View File

@@ -11,7 +11,7 @@
/*
* e820.c contains the related e820 operations; like HV to get memory info for its MMU setup;
* and hide HV memory from VM0...
* and hide HV memory from SOS_VM...
*/
static uint32_t e820_entries_count;
@@ -51,8 +51,8 @@ static void obtain_e820_mem_info(void)
}
}
/* before boot vm0(service OS), call it to hide the HV RAM entry in e820 table from vm0 */
void rebuild_vm0_e820(void)
/* before boot sos_vm(service OS), call it to hide the HV RAM entry in e820 table from sos_vm */
void rebuild_sos_vm_e820(void)
{
uint32_t i;
uint64_t entry_start;
@@ -116,7 +116,7 @@ void rebuild_vm0_e820(void)
e820_mem.total_mem_size -= CONFIG_HV_RAM_SIZE;
}
/* get some RAM below 1MB in e820 entries, hide it from vm0, return its start address */
/* get some RAM below 1MB in e820 entries, hide it from sos_vm, return its start address */
uint64_t e820_alloc_low_memory(uint32_t size_arg)
{
uint32_t i;

View File

@@ -67,7 +67,7 @@ uint64_t gpa2hpa(struct acrn_vm *vm, uint64_t gpa)
/**
* @pre: the gpa and hpa are identical mapping in SOS.
*/
uint64_t vm0_hpa2gpa(uint64_t hpa)
uint64_t sos_vm_hpa2gpa(uint64_t hpa)
{
return hpa;
}

View File

@@ -448,9 +448,9 @@ int32_t copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
* @retval 0 on success
*
* @pre vm != NULL
* @pre is_vm0(vm) == true
* @pre is_sos_vm(vm) == true
*/
void prepare_vm0_memmap(struct acrn_vm *vm)
void prepare_sos_vm_memmap(struct acrn_vm *vm)
{
uint32_t i;
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
@@ -462,11 +462,11 @@ void prepare_vm0_memmap(struct acrn_vm *vm)
const struct e820_entry *p_e820 = get_e820_entry();
const struct e820_mem_params *p_e820_mem_info = get_e820_mem_info();
dev_dbg(ACRN_DBG_GUEST, "vm0: bottom memory - 0x%llx, top memory - 0x%llx\n",
dev_dbg(ACRN_DBG_GUEST, "sos_vm: bottom memory - 0x%llx, top memory - 0x%llx\n",
p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_top);
if (p_e820_mem_info->mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
panic("Please configure VM0_ADDRESS_SPACE correctly!\n");
panic("Please configure SOS_VM_ADDRESS_SPACE correctly!\n");
}
/* create real ept map for all ranges with UC */
@@ -481,7 +481,7 @@ void prepare_vm0_memmap(struct acrn_vm *vm)
}
}
dev_dbg(ACRN_DBG_GUEST, "VM0 e820 layout:\n");
dev_dbg(ACRN_DBG_GUEST, "SOS_VM e820 layout:\n");
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
dev_dbg(ACRN_DBG_GUEST, "e820 table: %d type: 0x%x", i, entry->type);

View File

@@ -10,7 +10,7 @@ int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
{
int32_t ret = -1;
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
ret = 0;
} else {
uint8_t i;
@@ -132,7 +132,7 @@ static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
clac();
pause_vm(vm); /* pause vm0 before suspend system */
pause_vm(vm); /* pause sos_vm before suspend system */
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
}

View File

@@ -308,7 +308,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
* vcpu_id/pcpu_id mapping table:
*
* if
* VM0_CPUS[2] = {0, 2} , VM1_CPUS[2] = {3, 1};
* SOS_VM_CPUS[2] = {0, 2} , VM1_CPUS[2] = {3, 1};
* then
* for physical CPU 0 : vcpu->pcpu_id = 0, vcpu->vcpu_id = 0, vmid = 0;
* for physical CPU 2 : vcpu->pcpu_id = 2, vcpu->vcpu_id = 1, vmid = 0;

View File

@@ -365,7 +365,7 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
#ifdef CONFIG_PARTITION_MODE
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
#else
if (is_vm0(vcpu->vm)) {
if (is_sos_vm(vcpu->vm)) {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
} else {
*ecx = subleaf & 0xFFU;

View File

@@ -174,7 +174,7 @@ vlapic_build_id(const struct acrn_vlapic *vlapic)
*/
vlapic_id = per_cpu(lapic_id, vcpu->pcpu_id);
#else
if (is_vm0(vcpu->vm)) {
if (is_sos_vm(vcpu->vm)) {
/* Get APIC ID sequence format from cpu_storage */
vlapic_id = per_cpu(lapic_id, vcpu->vcpu_id);
} else {
@@ -2183,7 +2183,7 @@ int32_t vlapic_create(struct acrn_vcpu *vcpu)
uint64_t *pml4_page =
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
/* only need unmap it from SOS as UOS never mapped it */
if (is_vm0(vcpu->vm)) {
if (is_sos_vm(vcpu->vm)) {
ept_mr_del(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, PAGE_SIZE);
}

View File

@@ -49,7 +49,7 @@ static inline bool is_vm_valid(uint16_t vm_id)
*/
static void setup_io_bitmap(struct acrn_vm *vm)
{
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
(void)memset(vm->arch_vm.io_bitmap, 0x00U, PAGE_SIZE * 2U);
} else {
/* block all IO port access from Guest */
@@ -112,10 +112,10 @@ int32_t create_vm(struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm)
/* Only for SOS: Configure VM software information */
/* For UOS: This VM software information is configure in DM */
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
vm->snoopy_mem = false;
rebuild_vm0_e820();
prepare_vm0_memmap(vm);
rebuild_sos_vm_e820();
prepare_sos_vm_memmap(vm);
#ifndef CONFIG_EFI_STUB
status = init_vm_boot_info(vm);
@@ -123,7 +123,7 @@ int32_t create_vm(struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm)
status = efi_boot_init();
#endif
if (status == 0) {
init_iommu_vm0_domain(vm);
init_iommu_sos_vm_domain(vm);
} else {
need_cleanup = true;
}
@@ -163,7 +163,7 @@ int32_t create_vm(struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm)
vm_setup_cpu_state(vm);
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
/* Load pm S state data */
if (vm_load_pm_s_state(vm) == 0) {
register_pm1ab_handler(vm);
@@ -290,7 +290,7 @@ int32_t reset_vm(struct acrn_vm *vm)
reset_vcpu(vcpu);
}
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
(void )vm_sw_loader(vm);
}
@@ -411,22 +411,22 @@ int32_t prepare_vm(uint16_t pcpu_id)
#else
/* Create vm/vcpu for vm0 */
static int32_t prepare_vm0(void)
/* Create vm/vcpu for sos_vm */
static int32_t prepare_sos_vm(void)
{
int32_t err;
uint16_t i;
struct acrn_vm *vm = NULL;
struct acrn_vm_config vm0_config;
struct acrn_vm_config sos_vm_config;
(void)memset((void *)&vm0_config, 0U, sizeof(vm0_config));
vm0_config.vm_hw_num_cores = get_pcpu_nums();
(void)memset((void *)&sos_vm_config, 0U, sizeof(sos_vm_config));
sos_vm_config.vm_hw_num_cores = get_pcpu_nums();
err = create_vm(&vm0_config, &vm);
err = create_vm(&sos_vm_config, &vm);
if (err == 0) {
/* Allocate all cpus to vm0 at the beginning */
for (i = 0U; i < vm0_config.vm_hw_num_cores; i++) {
/* Allocate all cpus to sos_vm at the beginning */
for (i = 0U; i < sos_vm_config.vm_hw_num_cores; i++) {
err = prepare_vcpu(vm, i);
if (err != 0) {
break;
@@ -439,14 +439,14 @@ static int32_t prepare_vm0(void)
vm_sw_loader = general_sw_loader;
}
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
(void)vm_sw_loader(vm);
}
/* start vm0 BSP automatically */
/* start sos_vm BSP automatically */
start_vm(vm);
pr_acrnlog("Start VM0");
pr_acrnlog("Start SOS_VM");
}
}
@@ -457,9 +457,9 @@ int32_t prepare_vm(uint16_t pcpu_id)
{
int32_t err = 0;
/* prepare vm0 if pcpu_id is BOOT_CPU_ID */
/* prepare sos_vm if pcpu_id is BOOT_CPU_ID */
if (pcpu_id == BOOT_CPU_ID) {
err = prepare_vm0();
err = prepare_sos_vm();
}
return err;

View File

@@ -195,10 +195,10 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
if (!is_hypercall_from_ring0()) {
pr_err("hypercall is only allowed from RING-0!\n");
ret = -EACCES;
} else if (!is_vm0(vm) && (hypcall_id != HC_WORLD_SWITCH) &&
} else if (!is_sos_vm(vm) && (hypcall_id != HC_WORLD_SWITCH) &&
(hypcall_id != HC_INITIALIZE_TRUSTY) &&
(hypcall_id != HC_SAVE_RESTORE_SWORLD_CTX)) {
pr_err("hypercall %d is only allowed from VM0!\n", hypcall_id);
pr_err("hypercall %d is only allowed from SOS_VM!\n", hypcall_id);
ret = -EACCES;
} else {
/* Dispatch the hypercall handler */

View File

@@ -553,7 +553,7 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
case MSR_IA32_BIOS_UPDT_TRIG:
{
/* We only allow SOS to do uCode update */
if (is_vm0(vcpu->vm)) {
if (is_sos_vm(vcpu->vm)) {
acrn_update_ucode(vcpu, v);
}
break;

View File

@@ -100,14 +100,14 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
vmtrr->def_type.bits.fixed_enable = 1U;
vmtrr->def_type.bits.type = MTRR_MEM_TYPE_UC;
if (is_vm0(vcpu->vm)) {
if (is_sos_vm(vcpu->vm)) {
cap.value = msr_read(MSR_IA32_MTRR_CAP);
}
for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) {
if (cap.bits.fix != 0U) {
/*
* The system firmware runs in VMX non-root mode on VM0.
* The system firmware runs in VMX non-root mode on SOS_VM.
* In some cases, the firmware needs particular mem type
* at certain mmeory locations (e.g. UC for some
* hardware registers), so we need to configure EPT
@@ -116,7 +116,7 @@ void init_vmtrr(struct acrn_vcpu *vcpu)
vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr);
} else {
/*
* For non-vm0 EPT, all memory is setup with WB type in
* For non-sos_vm EPT, all memory is setup with WB type in
* EPT, so we setup fixed range MTRRs accordingly.
*/
vmtrr->fixed_range[i].value = MTRR_FIXED_RANGE_ALL_WB;

View File

@@ -536,7 +536,7 @@ static void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
void register_pio_emulation_handler(struct acrn_vm *vm, uint32_t pio_idx,
const struct vm_io_range *range, io_read_fn_t io_read_fn_ptr, io_write_fn_t io_write_fn_ptr)
{
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
deny_guest_pio_access(vm, range->base, range->len);
}
vm->arch_vm.emul_pio[pio_idx].port_start = range->base;
@@ -588,7 +588,7 @@ int32_t register_mmio_emulation_handler(struct acrn_vm *vm,
* should unmap it. But UOS will not, so we shouldn't
* need to unmap it.
*/
if (is_vm0(vm)) {
if (is_sos_vm(vm)) {
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, end - start);
}

View File

@@ -81,7 +81,7 @@ static int32_t request_notification_irq(irq_action_t func, void *data)
*/
void setup_notification(void)
{
/* support IPI notification, VM0 will register all CPU */
/* support IPI notification, SOS_VM will register all CPU */
if (request_notification_irq(kick_notification, NULL) < 0) {
pr_err("Failed to setup notification");
}

View File

@@ -58,10 +58,10 @@ const struct memory_ops ppt_mem_ops = {
.get_pd_page = ppt_get_pd_page,
};
static struct page vm0_pml4_pages[PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page vm0_pdpt_pages[PDPT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page vm0_pd_pages[PD_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page vm0_pt_pages[PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page sos_vm_pml4_pages[PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page sos_vm_pdpt_pages[PDPT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page sos_vm_pd_pages[PD_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
static struct page sos_vm_pt_pages[PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
/* uos_nworld_pml4_pages[i] is ...... of UOS i (whose vm_id = i +1) */
static struct page uos_nworld_pml4_pages[CONFIG_MAX_VM_NUM - 1U][PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))];
@@ -78,10 +78,10 @@ static union pgtable_pages_info ept_pages_info[CONFIG_MAX_VM_NUM] = {
{
.ept = {
.top_address_space = EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE),
.nworld_pml4_base = vm0_pml4_pages,
.nworld_pdpt_base = vm0_pdpt_pages,
.nworld_pd_base = vm0_pd_pages,
.nworld_pt_base = vm0_pt_pages,
.nworld_pml4_base = sos_vm_pml4_pages,
.nworld_pdpt_base = sos_vm_pdpt_pages,
.nworld_pd_base = sos_vm_pd_pages,
.nworld_pt_base = sos_vm_pt_pages,
},
},
};

View File

@@ -156,7 +156,7 @@ bool iommu_snoop_supported(const struct acrn_vm *vm)
static struct dmar_drhd_rt dmar_drhd_units[CONFIG_MAX_IOMMU_NUM];
static bool iommu_page_walk_coherent = true;
static struct iommu_domain *vm0_domain;
static struct iommu_domain *sos_vm_domain;
/* Domain id 0 is reserved in some cases per VT-d */
#define MAX_DOMAIN_NUM (CONFIG_MAX_VM_NUM + 1)
@@ -1061,8 +1061,8 @@ int32_t assign_iommu_device(struct iommu_domain *domain, uint8_t bus, uint8_t de
/* TODO: check if the device assigned */
if (vm0_domain != NULL) {
status = remove_iommu_device(vm0_domain, 0U, bus, devfun);
if (sos_vm_domain != NULL) {
status = remove_iommu_device(sos_vm_domain, 0U, bus, devfun);
}
if (status == 0) {
@@ -1079,8 +1079,8 @@ int32_t unassign_iommu_device(const struct iommu_domain *domain, uint8_t bus, ui
/* TODO: check if the device assigned */
status = remove_iommu_device(domain, 0U, bus, devfun);
if ((status == 0) && (vm0_domain != NULL)) {
status = add_iommu_device(vm0_domain, 0U, bus, devfun);
if ((status == 0) && (sos_vm_domain != NULL)) {
status = add_iommu_device(sos_vm_domain, 0U, bus, devfun);
}
return status;
@@ -1121,22 +1121,22 @@ int32_t init_iommu(void)
return ret;
}
void init_iommu_vm0_domain(struct acrn_vm *vm0)
void init_iommu_sos_vm_domain(struct acrn_vm *sos_vm)
{
uint16_t bus;
uint16_t devfun;
vm0->iommu = create_iommu_domain(vm0->vm_id, hva2hpa(vm0->arch_vm.nworld_eptp), 48U);
sos_vm->iommu = create_iommu_domain(sos_vm->vm_id, hva2hpa(sos_vm->arch_vm.nworld_eptp), 48U);
vm0_domain = (struct iommu_domain *) vm0->iommu;
if (vm0_domain == NULL) {
pr_err("vm0 domain is NULL\n");
sos_vm_domain = (struct iommu_domain *) sos_vm->iommu;
if (sos_vm_domain == NULL) {
pr_err("sos_vm domain is NULL\n");
} else {
for (bus = 0U; bus < CONFIG_IOMMU_BUS_NUM; bus++) {
for (devfun = 0U; devfun <= 255U; devfun++) {
if (add_iommu_device(vm0_domain, 0U, (uint8_t)bus, (uint8_t)devfun) != 0) {
/* the panic only occurs before VM0 starts running in sharing mode */
panic("Failed to add %x:%x.%x to VM0 domain", bus, pci_slot(devfun), pci_func(devfun));
if (add_iommu_device(sos_vm_domain, 0U, (uint8_t)bus, (uint8_t)devfun) != 0) {
/* the panic only occurs before SOS_VM starts running in sharing mode */
panic("Failed to add %x:%x.%x to SOS_VM domain", bus, pci_slot(devfun), pci_func(devfun));
}
}
}