mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-22 13:37:10 +00:00
acrn-config: generate SOS ivshmem device information
Support enable ivshmem for SOS. Insert the ivshmem device information if it is enabled. 1. get ivshmem vbar based: - vbar[0] is size 0x100 - vbar[2] is specified MB size 2. get vbdf for ivshmem device Tracked-On: #5426 Signed-off-by: Yang,Yu-chu <yu-chu,yang>
This commit is contained in:
parent
0f16746c1e
commit
2290396ef5
@ -301,6 +301,35 @@ def generate_file(config):
|
|||||||
mmiolist_per_vm[vm_id].append(MmioWindow(start = bar_2, end = bar_2 + int_size - 1))
|
mmiolist_per_vm[vm_id].append(MmioWindow(start = bar_2, end = bar_2 + int_size - 1))
|
||||||
mmiolist_per_vm[vm_id].sort()
|
mmiolist_per_vm[vm_id].sort()
|
||||||
print("", file=config)
|
print("", file=config)
|
||||||
|
elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
|
||||||
|
ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
|
||||||
|
"FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||||
|
shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region)
|
||||||
|
if vm_id not in shmem_regions.keys():
|
||||||
|
continue
|
||||||
|
idx = 0
|
||||||
|
for shm in ivshmem_region:
|
||||||
|
if shm is None or shm.strip() == '':
|
||||||
|
continue
|
||||||
|
shm_splited = shm.split(',')
|
||||||
|
name = shm_splited[0].strip()
|
||||||
|
size = shm_splited[1].strip()
|
||||||
|
try:
|
||||||
|
int_size = int(size) * 0x100000
|
||||||
|
except:
|
||||||
|
int_size = 0
|
||||||
|
# vbar[0] for shared memory is 0x200100
|
||||||
|
free_bar = get_free_mmio(matching_mmios, mmiolist_per_vm[vm_id], 0x200100)
|
||||||
|
mmiolist_per_vm[vm_id].append(free_bar)
|
||||||
|
mmiolist_per_vm[vm_id].sort()
|
||||||
|
print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"),
|
||||||
|
" .vbar_base[0] = {:#x}UL, \\".format(free_bar.start), file=config)
|
||||||
|
free_bar = get_free_mmio(matching_mmios, mmiolist_per_vm[vm_id], int_size + 0x200000)
|
||||||
|
mmiolist_per_vm[vm_id].append(free_bar)
|
||||||
|
mmiolist_per_vm[vm_id].sort()
|
||||||
|
print("{}.vbar_base[2] = {:#x}UL".format(' ' * 54, free_bar.start), file=config)
|
||||||
|
print("", file=config)
|
||||||
|
idx += 1
|
||||||
|
|
||||||
compared_bdf = []
|
compared_bdf = []
|
||||||
for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
|
for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
|
||||||
|
@ -95,7 +95,6 @@ def get_pci_num(pci_devs):
|
|||||||
|
|
||||||
return pci_devs_num
|
return pci_devs_num
|
||||||
|
|
||||||
|
|
||||||
def get_shmem_regions(raw_shmem_regions):
|
def get_shmem_regions(raw_shmem_regions):
|
||||||
shmem_regions = {'err': []}
|
shmem_regions = {'err': []}
|
||||||
for raw_shmem_region in raw_shmem_regions:
|
for raw_shmem_region in raw_shmem_regions:
|
||||||
@ -163,7 +162,10 @@ def get_pci_dev_num_per_vm():
|
|||||||
pci_dev_num[vm_i] -= 1
|
pci_dev_num[vm_i] -= 1
|
||||||
pci_dev_num_per_vm[vm_i] = pci_dev_num[vm_i] + shmem_num_i
|
pci_dev_num_per_vm[vm_i] = pci_dev_num[vm_i] + shmem_num_i
|
||||||
elif "SOS_VM" == VM_DB[vm_type]['load_type']:
|
elif "SOS_VM" == VM_DB[vm_type]['load_type']:
|
||||||
continue
|
shmem_num_i = 0
|
||||||
|
if shmem_enabled == 'y' and vm_i in shmem_num.keys():
|
||||||
|
shmem_num_i = shmem_num[vm_i]
|
||||||
|
pci_dev_num_per_vm[vm_i] = shmem_num_i
|
||||||
|
|
||||||
return pci_dev_num_per_vm
|
return pci_dev_num_per_vm
|
||||||
|
|
||||||
@ -793,9 +795,8 @@ def share_mem_check(shmem_regions, raw_shmem_regions, vm_type_info, prime_item,
|
|||||||
ERR_LIST[key] = "The communication VM IDs of the share memory should not be duplicated."
|
ERR_LIST[key] = "The communication VM IDs of the share memory should not be duplicated."
|
||||||
return
|
return
|
||||||
for target_vm_id in int_vmid_list:
|
for target_vm_id in int_vmid_list:
|
||||||
if curr_vm_id not in vm_type_info.keys() or target_vm_id not in vm_type_info.keys() \
|
if curr_vm_id not in vm_type_info.keys() or target_vm_id not in vm_type_info.keys():
|
||||||
or vm_type_info[curr_vm_id] in ['SOS_VM'] or vm_type_info[target_vm_id] in ['SOS_VM']:
|
ERR_LIST[key] = "Shared Memory can be only configured for existed VMs."
|
||||||
ERR_LIST[key] = "Shared Memory can be only configured for existed Pre-launched VMs and Post-launched VMs."
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if name =='' or size == '':
|
if name =='' or size == '':
|
||||||
|
@ -199,16 +199,23 @@ def generate_file(vm_info, config):
|
|||||||
print("\t{", file=config)
|
print("\t{", file=config)
|
||||||
print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config)
|
print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config)
|
||||||
if vm_i in vm_info.cfg_pci.pci_devs.keys():
|
if vm_i in vm_info.cfg_pci.pci_devs.keys():
|
||||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
|
||||||
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
free_bdf = find_unused_bdf(sos_used_bdf, "ivshmem")
|
||||||
bdf_tuple = BusDevFunc(0,pci_cnt,0)
|
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02d}U, .f = 0x{:02d}U}}," \
|
||||||
vm_used_bdf.append(bdf_tuple)
|
.format(free_bdf.dev,free_bdf.func), file=config)
|
||||||
|
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
||||||
|
sos_used_bdf.append(free_bdf)
|
||||||
|
else:
|
||||||
|
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
||||||
|
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
||||||
|
bdf_tuple = BusDevFunc(0,pci_cnt,0)
|
||||||
|
vm_used_bdf.append(bdf_tuple)
|
||||||
elif vm_i not in vm_info.cfg_pci.pci_devs.keys():
|
elif vm_i not in vm_info.cfg_pci.pci_devs.keys():
|
||||||
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
|
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
|
||||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
||||||
bdf_tuple = BusDevFunc(0,pci_cnt,0)
|
bdf_tuple = BusDevFunc(0,pci_cnt,0)
|
||||||
vm_used_bdf.append(bdf_tuple)
|
vm_used_bdf.append(bdf_tuple)
|
||||||
else:
|
elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM":
|
||||||
print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config)
|
print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config)
|
||||||
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
||||||
for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
||||||
@ -219,6 +226,10 @@ def generate_file(vm_info, config):
|
|||||||
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".format(index), file=config)
|
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".format(index), file=config)
|
||||||
print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config)
|
print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config)
|
||||||
break
|
break
|
||||||
|
elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
|
||||||
|
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".format(index), file=config)
|
||||||
|
print("\t\tSOS_IVSHMEM_DEVICE_{}_VBAR".format(index), file=config)
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}".format(index), file=config)
|
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}".format(index), file=config)
|
||||||
break
|
break
|
||||||
|
Loading…
Reference in New Issue
Block a user