mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-16 16:32:51 +00:00
acrn-config: support configuration for Inter-VM communication
This patch is to support the Inter-VM communication by IVSHMEM in config tool. Users can configure IVSHMEM_ENABLE to enable or disable Inter-VM communication by IVSHMEM; users can configure the name, size, communication VM IDs of the IVSHMEM devices in the VM settings of scenario xmls, then config tool will generate the related IVSHMEM configurations for Inter-VM communication. The config tool will do sanity check including when saving the xmls: the format of shared memory region configuration is [name],[size],[VM ID]:[VM ID](:[VM ID]...); the max size of the name is 32 bytes; the names should not be duplicated; the mininum value of shared memory region size is 2M; the value of shared memory region is a power of 2; the size of share memory region should not extended the size of available ram. Tracked-On: #4853 Signed-off-by: Shuang Zheng <shuang.zheng@intel.com> Acked-by: Victor Sun <victor.sun@intel.com>
This commit is contained in:
parent
4665a17f72
commit
e46c5ac350
@ -62,5 +62,16 @@ def generate_file(config):
|
||||
|
||||
i_cnt += 1
|
||||
|
||||
ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
|
||||
raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||
if ivshmem_enabled == 'y':
|
||||
shmem_cnt = 0
|
||||
for raw_shmem_region in raw_shmem_regions:
|
||||
if raw_shmem_region and raw_shmem_region.strip != '':
|
||||
name = raw_shmem_region.split(',')[0].strip()
|
||||
print("", file=config)
|
||||
print("#define IVSHMEM_SHM_REGION_%-21d"%shmem_cnt, end="", file=config)
|
||||
print('"{}"'.format(name), file=config)
|
||||
shmem_cnt += 1
|
||||
# write the end to the pci devices
|
||||
print("{0}".format(PCI_END_HEADER), file=config)
|
||||
|
@ -78,4 +78,26 @@ def generate_file(config):
|
||||
|
||||
i_cnt += 1
|
||||
|
||||
ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
|
||||
if ivshmem_enabled == 'y':
|
||||
board_cfg_lib.parse_mem()
|
||||
for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
||||
index = shm_name[:shm_name.find('_')]
|
||||
i_cnt = 0
|
||||
for bar_i, bar_attr in bar_attr_dic.items():
|
||||
i_cnt += 1
|
||||
if bar_i == 0:
|
||||
if len(bar_attr_dic.keys()) == 1:
|
||||
print("#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"),
|
||||
" .vbar_base[{}] = {}UL".format(bar_i, bar_attr.addr), file=config)
|
||||
else:
|
||||
print("#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"),
|
||||
" .vbar_base[{}] = {}UL, \\".format(bar_i, bar_attr.addr), file=config)
|
||||
elif i_cnt == len(bar_attr_dic.keys()):
|
||||
print("{}.vbar_base[{}] = {}UL".format(' ' * 54, bar_i, bar_attr.addr), file=config)
|
||||
else:
|
||||
print("{}.vbar_base[{}] = {}UL, \\".format(' ' * 54, bar_i, bar_attr.addr), file=config)
|
||||
|
||||
print("", file=config)
|
||||
|
||||
print(VBAR_INFO_ENDIF, file=config)
|
||||
|
@ -33,52 +33,6 @@ VM_NUM_MAP_TOTAL_HV_RAM_SIZE = {
|
||||
MEM_ALIGN = 2 * common.SIZE_M
|
||||
|
||||
|
||||
def find_avl_memory(ram_range, hpa_size, hv_start_offset):
|
||||
"""
|
||||
This is get hv address from System RAM as host physical size
|
||||
:param ram_range: System RAM mapping
|
||||
:param hpa_size: fixed host physical size
|
||||
:param hv_start_offset: base address of HV RAM start
|
||||
:return: start host physical address
|
||||
"""
|
||||
ret_start_addr = 0
|
||||
tmp_order_key = 0
|
||||
|
||||
tmp_order_key = sorted(ram_range)
|
||||
for start_addr in tmp_order_key:
|
||||
mem_range = ram_range[start_addr]
|
||||
if start_addr < hv_start_offset < start_addr + ram_range[start_addr]:
|
||||
# 256M address located in this start ram range
|
||||
if start_addr + mem_range - hv_start_offset > int(hpa_size, 10):
|
||||
ret_start_addr = hv_start_offset
|
||||
break
|
||||
elif start_addr > hv_start_offset:
|
||||
# above 256M address, than return the start address of this ram range
|
||||
ret_start_addr = start_addr
|
||||
break
|
||||
|
||||
return hex(ret_start_addr)
|
||||
|
||||
|
||||
def get_ram_range():
|
||||
""" Get System RAM range mapping """
|
||||
# read system ram from board_info.xml
|
||||
ram_range = {}
|
||||
|
||||
io_mem_lines = board_cfg_lib.get_info(
|
||||
common.BOARD_INFO_FILE, "<IOMEM_INFO>", "</IOMEM_INFO>")
|
||||
|
||||
for line in io_mem_lines:
|
||||
if 'System RAM' not in line:
|
||||
continue
|
||||
start_addr = int(line.split('-')[0], 16)
|
||||
end_addr = int(line.split('-')[1].split(':')[0], 16)
|
||||
mem_range = end_addr - start_addr
|
||||
ram_range[start_addr] = mem_range
|
||||
|
||||
return ram_range
|
||||
|
||||
|
||||
def get_serial_type():
|
||||
""" Get serial console type specified by user """
|
||||
ttys_type = ''
|
||||
@ -123,17 +77,20 @@ def get_memory(hv_info, config):
|
||||
err_dic["board config: total vm number error"] = "VM num should not be greater than 8"
|
||||
return err_dic
|
||||
|
||||
ram_range = get_ram_range()
|
||||
|
||||
# reseve 16M memory for hv sbuf, ramoops, etc.
|
||||
reserved_ram = 0x1000000
|
||||
# We recommend to put hv ram start address high than 0x10000000 to
|
||||
# reduce memory conflict with GRUB/SOS Kernel.
|
||||
hv_start_offset = 0x10000000
|
||||
total_size = reserved_ram + hv_ram_size
|
||||
avl_start_addr = find_avl_memory(ram_range, str(total_size), hv_start_offset)
|
||||
for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
|
||||
if hv_start_offset <= start_addr < 0x80000000:
|
||||
del board_cfg_lib.USED_RAM_RANGE[start_addr]
|
||||
ram_range = board_cfg_lib.get_ram_range()
|
||||
avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset)
|
||||
hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
|
||||
hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
|
||||
board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size
|
||||
|
||||
if not hv_info.mem.hv_ram_start:
|
||||
print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config)
|
||||
@ -149,6 +106,7 @@ def get_memory(hv_info, config):
|
||||
print("CONFIG_SOS_RAM_SIZE={}".format(hv_info.mem.sos_ram_size), file=config)
|
||||
print("CONFIG_UOS_RAM_SIZE={}".format(hv_info.mem.uos_ram_size), file=config)
|
||||
print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config)
|
||||
print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)
|
||||
|
||||
|
||||
def get_serial_console(config):
|
||||
|
@ -153,6 +153,8 @@ class Memory:
|
||||
self.platform_ram_size = 0
|
||||
self.sos_ram_size = 0
|
||||
self.uos_ram_size = 0
|
||||
self.ivshmem_enable = 'n'
|
||||
self.ivshmem_region = []
|
||||
|
||||
def get_info(self):
|
||||
self.stack_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "STACK_SIZE")
|
||||
@ -162,6 +164,8 @@ class Memory:
|
||||
self.platform_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "PLATFORM_RAM_SIZE")
|
||||
self.sos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "SOS_RAM_SIZE")
|
||||
self.uos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "UOS_RAM_SIZE")
|
||||
self.ivshmem_enable = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
|
||||
self.ivshmem_region = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||
|
||||
def check_item(self):
|
||||
hv_cfg_lib.hv_size_check(self.stack_size, "MEMORY", "STACK_SIZE")
|
||||
@ -169,6 +173,7 @@ class Memory:
|
||||
hv_cfg_lib.hv_size_check(self.platform_ram_size, "MEMORY", "PLATFORM_RAM_SIZE")
|
||||
hv_cfg_lib.hv_size_check(self.sos_ram_size, "MEMORY", "SOS_RAM_SIZE")
|
||||
hv_cfg_lib.hv_size_check(self.uos_ram_size, "MEMORY", "UOS_RAM_SIZE")
|
||||
hv_cfg_lib.ny_support_check(self.ivshmem_enable, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
|
||||
|
||||
|
||||
class HvInfo:
|
||||
|
@ -21,6 +21,7 @@ LEGACY_TTYS = {
|
||||
|
||||
VALID_LEGACY_IRQ = []
|
||||
ERR_LIST = {}
|
||||
USED_RAM_RANGE = {}
|
||||
|
||||
HEADER_LICENSE = common.open_license() + "\n"
|
||||
|
||||
@ -363,6 +364,7 @@ class Pci_Dev_Bar_Desc:
|
||||
def __init__(self):
|
||||
self.pci_dev_dic = {}
|
||||
self.pci_bar_dic = {}
|
||||
self.shm_bar_dic = {}
|
||||
|
||||
PCI_DEV_BAR_DESC = Pci_Dev_Bar_Desc()
|
||||
SUB_NAME_COUNT = {}
|
||||
@ -472,6 +474,47 @@ def parser_pci():
|
||||
PCI_DEV_BAR_DESC.pci_bar_dic[cur_bdf] = tmp_bar_dic
|
||||
|
||||
|
||||
def parse_mem():
|
||||
raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||
|
||||
global USED_RAM_RANGE
|
||||
for shm_name, shm_bar_dic in PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
||||
if 0 in shm_bar_dic.keys() and int(shm_bar_dic[0].addr, 16) in USED_RAM_RANGE.keys():
|
||||
del USED_RAM_RANGE[int(shm_bar_dic[0].addr, 16)]
|
||||
if 2 in shm_bar_dic.keys() and int(shm_bar_dic[2].addr, 16)-0xC in USED_RAM_RANGE.keys():
|
||||
del USED_RAM_RANGE[int(shm_bar_dic[2].addr, 16)-0xC]
|
||||
|
||||
idx = 0
|
||||
for shm in raw_shmem_regions:
|
||||
if shm is None or shm.strip() == '':
|
||||
continue
|
||||
shm_splited = shm.split(',')
|
||||
name = shm_splited[0].strip()
|
||||
size = shm_splited[1].strip()
|
||||
|
||||
if size.isdecimal():
|
||||
int_size = int(size)
|
||||
else:
|
||||
int_size = int(size, 16)
|
||||
ram_range = get_ram_range()
|
||||
tmp_bar_dict = {}
|
||||
hv_start_offset = 0x80000000
|
||||
ret_start_addr = find_avl_memory(ram_range, str(0x200100), hv_start_offset)
|
||||
bar_mem_0 = Bar_Mem()
|
||||
bar_mem_0.addr = hex(common.round_up(int(ret_start_addr, 16), 0x200000))
|
||||
USED_RAM_RANGE[int(bar_mem_0.addr, 16)] = 0x100
|
||||
tmp_bar_dict[0] = bar_mem_0
|
||||
ram_range = get_ram_range()
|
||||
hv_start_offset2 = 0x100000000
|
||||
ret_start_addr2 = find_avl_memory(ram_range, str(int_size + 0x200000), hv_start_offset2)
|
||||
bar_mem_2 = Bar_Mem()
|
||||
bar_mem_2.addr = hex(common.round_up(int(ret_start_addr2, 16), 0x200000) + 0xC)
|
||||
USED_RAM_RANGE[common.round_up(int(ret_start_addr2, 16), 0x20000)] = int_size
|
||||
tmp_bar_dict[2] = bar_mem_2
|
||||
PCI_DEV_BAR_DESC.shm_bar_dic[str(idx)+'_'+name] = tmp_bar_dict
|
||||
idx += 1
|
||||
|
||||
|
||||
def is_rdt_supported():
|
||||
"""
|
||||
Returns True if platform supports RDT else False
|
||||
@ -569,3 +612,71 @@ def is_tpm_passthru():
|
||||
tpm_passthru = True
|
||||
|
||||
return tpm_passthru
|
||||
|
||||
|
||||
def find_avl_memory(ram_range, hpa_size, hv_start_offset):
|
||||
"""
|
||||
This is get hv address from System RAM as host physical size
|
||||
:param ram_range: System RAM mapping
|
||||
:param hpa_size: fixed host physical size
|
||||
:param hv_start_offset: base address of HV RAM start
|
||||
:return: start host physical address
|
||||
"""
|
||||
ret_start_addr = 0
|
||||
tmp_order_key = 0
|
||||
int_hpa_size = int(hpa_size, 10)
|
||||
|
||||
tmp_order_key = sorted(ram_range)
|
||||
for start_addr in tmp_order_key:
|
||||
mem_range = ram_range[start_addr]
|
||||
if start_addr <= hv_start_offset and hv_start_offset + int_hpa_size <= start_addr + mem_range:
|
||||
ret_start_addr = hv_start_offset
|
||||
break
|
||||
elif start_addr >= hv_start_offset and mem_range >= int_hpa_size:
|
||||
ret_start_addr = start_addr
|
||||
break
|
||||
|
||||
return hex(ret_start_addr)
|
||||
|
||||
|
||||
def get_ram_range():
|
||||
""" Get System RAM range mapping """
|
||||
# read system ram from board_info.xml
|
||||
ram_range = {}
|
||||
|
||||
io_mem_lines = get_info(
|
||||
common.BOARD_INFO_FILE, "<IOMEM_INFO>", "</IOMEM_INFO>")
|
||||
|
||||
for line in io_mem_lines:
|
||||
if 'System RAM' not in line:
|
||||
continue
|
||||
start_addr = int(line.split('-')[0], 16)
|
||||
end_addr = int(line.split('-')[1].split(':')[0], 16)
|
||||
mem_range = end_addr - start_addr
|
||||
ram_range[start_addr] = mem_range
|
||||
|
||||
global USED_RAM_RANGE
|
||||
tmp_order_key_used = sorted(USED_RAM_RANGE)
|
||||
for start_addr_used in tmp_order_key_used:
|
||||
mem_range_used = USED_RAM_RANGE[start_addr_used]
|
||||
tmp_order_key = sorted(ram_range)
|
||||
for start_addr in tmp_order_key:
|
||||
mem_range = ram_range[start_addr]
|
||||
if start_addr < start_addr_used and start_addr_used + mem_range_used < start_addr + mem_range:
|
||||
ram_range[start_addr] = start_addr_used - start_addr
|
||||
ram_range[start_addr_used+mem_range_used] = start_addr + mem_range - start_addr_used - mem_range_used
|
||||
break
|
||||
elif start_addr == start_addr_used and start_addr_used + mem_range_used < start_addr + mem_range:
|
||||
del ram_range[start_addr]
|
||||
ram_range[start_addr_used + mem_range_used] = start_addr + mem_range - start_addr_used - mem_range_used
|
||||
break
|
||||
elif start_addr < start_addr_used and start_addr_used + mem_range_used == start_addr + mem_range:
|
||||
ram_range[start_addr] = start_addr_used - start_addr
|
||||
break
|
||||
elif start_addr == start_addr_used and start_addr_used + mem_range_used == start_addr + mem_range:
|
||||
del ram_range[start_addr]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
return ram_range
|
||||
|
@ -19,7 +19,7 @@ PY_CACHES = ["__pycache__", "../board_config/__pycache__", "../scenario_config/_
|
||||
GUEST_FLAG = ["0UL", "GUEST_FLAG_SECURE_WORLD_ENABLED", "GUEST_FLAG_LAPIC_PASSTHROUGH",
|
||||
"GUEST_FLAG_IO_COMPLETION_POLLING", "GUEST_FLAG_HIDE_MTRR", "GUEST_FLAG_RT"]
|
||||
|
||||
MULTI_ITEM = ["guest_flag", "pcpu_id", "vcpu_clos", "input", "block", "network", "pci_dev"]
|
||||
MULTI_ITEM = ["guest_flag", "pcpu_id", "vcpu_clos", "input", "block", "network", "pci_dev", "shmem_region"]
|
||||
|
||||
SIZE_K = 1024
|
||||
SIZE_M = SIZE_K * 1024
|
||||
@ -45,6 +45,7 @@ class MultiItem():
|
||||
self.vir_console = []
|
||||
self.vir_network = []
|
||||
self.pci_dev = []
|
||||
self.shmem_region = []
|
||||
|
||||
class TmpItem():
|
||||
|
||||
@ -282,6 +283,10 @@ def get_leaf_value(tmp, tag_str, leaf):
|
||||
if leaf.tag == "pci_dev" and tag_str == "pci_dev":
|
||||
tmp.multi.pci_dev.append(leaf.text)
|
||||
|
||||
# get shmem_region for vm
|
||||
if leaf.tag == "shmem_region" and tag_str == "shmem_region":
|
||||
tmp.multi.shmem_region.append(leaf.text)
|
||||
|
||||
|
||||
def get_sub_value(tmp, tag_str, vm_id):
|
||||
|
||||
@ -313,6 +318,10 @@ def get_sub_value(tmp, tag_str, vm_id):
|
||||
if tmp.multi.pci_dev and tag_str == "pci_dev":
|
||||
tmp.tag[vm_id] = tmp.multi.pci_dev
|
||||
|
||||
# append shmem_region for vm
|
||||
if tmp.multi.shmem_region and tag_str == "shmem_region":
|
||||
tmp.tag[vm_id] = tmp.multi.shmem_region
|
||||
|
||||
|
||||
def get_leaf_tag_map(config_file, branch_tag, tag_str=''):
|
||||
"""
|
||||
@ -409,6 +418,7 @@ def get_hv_item_tag(config_file, branch_tag, tag_str='', leaf_str=''):
|
||||
|
||||
tmp = ''
|
||||
root = get_config_root(config_file)
|
||||
|
||||
for item in root:
|
||||
# for each 2th level item
|
||||
for sub in item:
|
||||
@ -421,26 +431,35 @@ def get_hv_item_tag(config_file, branch_tag, tag_str='', leaf_str=''):
|
||||
continue
|
||||
|
||||
# for each 3rd level item
|
||||
tmp_list = []
|
||||
for leaf in sub:
|
||||
if leaf.tag == tag_str:
|
||||
if not leaf_str:
|
||||
if leaf.tag == tag_str and leaf.text and leaf.text != None:
|
||||
tmp = leaf.text
|
||||
if tag_str == "IVSHMEM_REGION":
|
||||
tmp_list.append(leaf.text)
|
||||
else:
|
||||
tmp = leaf.text
|
||||
|
||||
else:
|
||||
# for each 4rd level item
|
||||
tmp_list = []
|
||||
for leaf_s in leaf:
|
||||
if leaf_s.tag == leaf_str and leaf_s.text and leaf_s.text != None:
|
||||
if leaf_str == "CLOS_MASK" or leaf_str == "MBA_DELAY":
|
||||
if leaf_str == "CLOS_MASK" or leaf_str == "MBA_DELAY" or leaf_str == "IVSHMEM_REGION":
|
||||
tmp_list.append(leaf_s.text)
|
||||
else:
|
||||
tmp = leaf_s.text
|
||||
continue
|
||||
|
||||
if leaf_str == "CLOS_MASK" or leaf_str == "MBA_DELAY":
|
||||
if leaf_str == "CLOS_MASK" or leaf_str == "MBA_DELAY" or leaf_str == "IVSHMEM_REGION":
|
||||
tmp = tmp_list
|
||||
break
|
||||
|
||||
if tag_str == "IVSHMEM_REGION":
|
||||
tmp = tmp_list
|
||||
break
|
||||
|
||||
return tmp
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import math
|
||||
import common
|
||||
import board_cfg_lib
|
||||
|
||||
@ -92,6 +93,41 @@ def get_pci_num(pci_devs):
|
||||
return pci_devs_num
|
||||
|
||||
|
||||
def get_shmem_regions(raw_shmem_regions):
|
||||
shmem_regions = {'err': []}
|
||||
for raw_shmem_region in raw_shmem_regions:
|
||||
if raw_shmem_region and raw_shmem_region.strip():
|
||||
shm_splited = raw_shmem_region.split(',')
|
||||
if len(shm_splited) == 3 and (shm_splited[0].strip() != '' and shm_splited[1].strip() != ''
|
||||
and len(shm_splited[2].split(':')) >= 2):
|
||||
name = shm_splited[0].strip()
|
||||
size = shm_splited[1].strip()
|
||||
vmid_list = shm_splited[2].split(':')
|
||||
for i in range(len(vmid_list)):
|
||||
try:
|
||||
int_vm_id = int(vmid_list[i])
|
||||
except:
|
||||
shmem_regions['err'].append(raw_shmem_region)
|
||||
break
|
||||
if int_vm_id not in shmem_regions.keys():
|
||||
shmem_regions[int_vm_id] = [','.join([name, size, ':'.join(vmid_list[0:i]+vmid_list[i+1:])])]
|
||||
else:
|
||||
shmem_regions[int_vm_id].append(','.join([name, size, ':'.join(vmid_list[0:i]+vmid_list[i+1:])]))
|
||||
elif raw_shmem_region.strip() != '':
|
||||
shmem_regions['err'].append(raw_shmem_region)
|
||||
|
||||
return shmem_regions
|
||||
|
||||
|
||||
def get_shmem_num(shmem_regions):
|
||||
|
||||
shmem_num = {}
|
||||
for shm_i, shm_list in shmem_regions.items():
|
||||
shmem_num[shm_i] = len(shm_list)
|
||||
|
||||
return shmem_num
|
||||
|
||||
|
||||
def check_board_private_info():
|
||||
|
||||
if 'SOS_VM' not in common.VM_TYPES.values():
|
||||
@ -648,3 +684,111 @@ def vcpu_clos_check(cpus_per_vm, clos_per_vm, prime_item, item):
|
||||
key = "vm:id={},{},{}".format(vm_i, prime_item, item)
|
||||
ERR_LIST[key] = "CDP_ENABLED=y, the clos value should not be greater than {} for VM{}".format(common_clos_max - 1, vm_i)
|
||||
return
|
||||
|
||||
|
||||
def share_mem_check(shmem_regions, raw_shmem_regions, vm_type_info, prime_item, item, sub_item):
|
||||
|
||||
shmem_names = {}
|
||||
for shm_i, shm_list in shmem_regions.items():
|
||||
for shm_str in shm_list:
|
||||
index = -1
|
||||
if shm_i == 'err':
|
||||
for i in range(len(raw_shmem_regions)):
|
||||
if raw_shmem_regions[i] == shm_str:
|
||||
index = i
|
||||
break
|
||||
if index == -1:
|
||||
try:
|
||||
for i in range(len(raw_shmem_regions)):
|
||||
if raw_shmem_regions[i].split(',')[0].strip() == shm_str.split(',')[0].strip():
|
||||
index = i
|
||||
break
|
||||
except:
|
||||
index = 0
|
||||
key = "hv,{},{},{}".format(prime_item, item, sub_item, index)
|
||||
|
||||
shm_str_splited = shm_str.split(',')
|
||||
if len(shm_str_splited) < 3:
|
||||
ERR_LIST[key] = "The name, size, communication VM IDs of the share memory should be separated " \
|
||||
"by comma and not be empty."
|
||||
return
|
||||
try:
|
||||
curr_vm_id = int(shm_i)
|
||||
except:
|
||||
ERR_LIST[key] = "share memory region should be configure with format like this: VM0_VM2,0x20000,0:2"
|
||||
return
|
||||
name = shm_str_splited[0].strip()
|
||||
size = shm_str_splited[1].strip()
|
||||
vmid_list = shm_str_splited[2].split(':')
|
||||
int_vmid_list = []
|
||||
for vmid in vmid_list:
|
||||
try:
|
||||
int_vmid = int(vmid)
|
||||
int_vmid_list.append(int_vmid)
|
||||
except:
|
||||
ERR_LIST[key] = "The communication VM IDs of the share memory should be decimal and separated by comma."
|
||||
return
|
||||
if not int_vmid_list:
|
||||
ERR_LIST[key] = "The communication VM IDs of the share memory should be decimal and separated by comma."
|
||||
return
|
||||
if curr_vm_id in int_vmid_list or len(set(int_vmid_list)) != len(int_vmid_list):
|
||||
ERR_LIST[key] = "The communication VM IDs of the share memory should not be duplicated."
|
||||
return
|
||||
for target_vm_id in int_vmid_list:
|
||||
if curr_vm_id not in vm_type_info.keys() or target_vm_id not in vm_type_info.keys() \
|
||||
or vm_type_info[curr_vm_id] in ['SOS_VM'] or vm_type_info[target_vm_id] in ['SOS_VM']:
|
||||
ERR_LIST[key] = "Shared Memory can be only configured for existed Pre-launched VMs and Post-launched VMs."
|
||||
return
|
||||
|
||||
if name =='' or size == '':
|
||||
ERR_LIST[key] = "The name, size of the share memory should not be empty."
|
||||
return
|
||||
name_len = len(name)
|
||||
if name_len > 32 or name_len == 0:
|
||||
ERR_LIST[key] = "The size of share Memory name should be in range [1,32] bytes."
|
||||
return
|
||||
|
||||
int_size = 0
|
||||
try:
|
||||
if size.isdecimal():
|
||||
int_size = int(size)
|
||||
else:
|
||||
int_size = int(size, 16)
|
||||
except:
|
||||
ERR_LIST[key] = "The size of share Memory region should be decimal or hexadecimal."
|
||||
return
|
||||
if int_size < 0x200000 or int_size > 0x40000000:
|
||||
ERR_LIST[key] = "The size of share Memory region should be in [2M, 1G]."
|
||||
return
|
||||
if not math.log(int_size, 2).is_integer():
|
||||
ERR_LIST[key] = "The size of share Memory region should be a power of 2."
|
||||
return
|
||||
|
||||
if name in shmem_names.keys():
|
||||
shmem_names[name] += 1
|
||||
else:
|
||||
shmem_names[name] = 1
|
||||
if shmem_names[name] > len(vmid_list)+1:
|
||||
ERR_LIST[key] = "The names of share memory regions should not be duplicated: {}".format(name)
|
||||
return
|
||||
|
||||
board_cfg_lib.parse_mem()
|
||||
for shm_i, shm_list in shmem_regions.items():
|
||||
for shm_str in shm_list:
|
||||
shm_str_splited = shm_str.split(',')
|
||||
name = shm_str_splited[0].strip()
|
||||
index = 0
|
||||
try:
|
||||
for i in range(len(raw_shmem_regions)):
|
||||
if raw_shmem_regions[i].split(',')[0].strip() == shm_str.split(',')[0].strip():
|
||||
index = i
|
||||
break
|
||||
except:
|
||||
index = 0
|
||||
key = "hv,{},{},{}".format(prime_item, item, sub_item, index)
|
||||
if 'IVSHMEM_'+name in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.keys():
|
||||
bar_attr_dic = board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic['IVSHMEM_'+name]
|
||||
if (0 in bar_attr_dic.keys() and int(bar_attr_dic[0].addr, 16) < 0x80000000) \
|
||||
or (2 in bar_attr_dic.keys() and int(bar_attr_dic[2].addr, 16) < 0x100000000):
|
||||
ERR_LIST[key] = "Failed to get the start address of the shared memory, please check the size of it."
|
||||
return
|
||||
|
96
misc/acrn-config/scenario_config/ivshmem_cfg_h.py
Normal file
96
misc/acrn-config/scenario_config/ivshmem_cfg_h.py
Normal file
@ -0,0 +1,96 @@
|
||||
# Copyright (C) 2019 Intel Corporation. All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import common
|
||||
import scenario_cfg_lib
|
||||
import board_cfg_lib
|
||||
|
||||
IVSHMEM_HEADER_DEFINE = scenario_cfg_lib.HEADER_LICENSE + r"""
|
||||
#ifndef IVSHMEM_CFG_H
|
||||
#define IVSHMEM_CFG_H
|
||||
"""
|
||||
IVSHMEM_END_DEFINE = r"""#endif /* IVSHMEM_CFG_H */"""
|
||||
|
||||
|
||||
def gen_common_header(config):
|
||||
"""
|
||||
This is common header for ivshmem_cfg.h
|
||||
:param config: it is the pointer which file write to
|
||||
:return: None
|
||||
"""
|
||||
print("{0}".format(IVSHMEM_HEADER_DEFINE), file=config)
|
||||
|
||||
|
||||
def write_shmem_regions(config):
|
||||
raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||
shmem_regions = []
|
||||
shmem_dev_num = 0
|
||||
for raw_shm in raw_shmem_regions:
|
||||
if raw_shm is None or raw_shm.strip() == '':
|
||||
continue
|
||||
raw_shm_splited = raw_shm.split(',')
|
||||
if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
|
||||
and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
|
||||
shmem_regions.append((raw_shm_splited[0].strip(), raw_shm_splited[1].strip(), raw_shm_splited[2].strip().split(':')))
|
||||
shmem_dev_num += len(raw_shm_splited[2].strip().split(':'))
|
||||
|
||||
print("", file=config)
|
||||
print("/*", file=config)
|
||||
print(" * The IVSHMEM_SHM_SIZE is the sum of all memory regions.", file=config)
|
||||
print(" * The size range of each memory region is [2M, 1G) and is a power of 2.", file=config)
|
||||
print(" */", file=config)
|
||||
total_shm_size = 0
|
||||
if len(shmem_regions) > 0:
|
||||
for shmem_region in shmem_regions:
|
||||
int_size = 0
|
||||
size = shmem_region[1]
|
||||
try:
|
||||
if size.isdecimal():
|
||||
int_size = int(size)
|
||||
else:
|
||||
int_size = int(size, 16)
|
||||
except Exception as e:
|
||||
print('the format of shm size error: ', str(e))
|
||||
total_shm_size += int_size
|
||||
|
||||
print("#define IVSHMEM_SHM_SIZE\t{}UL".format(hex(total_shm_size)), file=config)
|
||||
print("#define IVSHMEM_DEV_NUM\t\t{}UL".format(shmem_dev_num), file=config)
|
||||
print("", file=config)
|
||||
print("/* All user defined memory regions */", file=config)
|
||||
print("\nstruct ivshmem_shm_region mem_regions[] = {", file=config)
|
||||
shmem_cnt = 0
|
||||
for shmem in shmem_regions:
|
||||
print("\t{", file=config)
|
||||
print('\t\t.name = IVSHMEM_SHM_REGION_{},'.format(shmem_cnt), file=config)
|
||||
try:
|
||||
if shmem[1].isdecimal():
|
||||
int_m_size = int(int(shmem[1])/0x100000)
|
||||
else:
|
||||
int_m_size = int(int(shmem[1], 16)/0x100000)
|
||||
except:
|
||||
int_m_size = 0
|
||||
print('\t\t.size = {}UL,\t\t/* {}M */'.format(shmem[1], int_m_size), file=config)
|
||||
print("\t},", file=config)
|
||||
shmem_cnt += 1
|
||||
print("};", file=config)
|
||||
print("", file=config)
|
||||
|
||||
|
||||
def generate_file(scenario_items, config):
|
||||
"""
|
||||
Start to generate ivshmem_cfg.h
|
||||
:param scenario_items: it is the class which contain all user setting information
|
||||
:param config: it is a file pointer of scenario information for writing to
|
||||
"""
|
||||
vm_info = scenario_items['vm']
|
||||
gen_common_header(config)
|
||||
|
||||
if vm_info.shmem.shmem_enabled == 'y':
|
||||
print("#include <ivshmem.h>", file=config)
|
||||
print("#include <pgtable.h>", file=config)
|
||||
print("#include <pci_devices.h>", file=config)
|
||||
write_shmem_regions(config)
|
||||
|
||||
print("{0}".format(IVSHMEM_END_DEFINE), file=config)
|
@ -27,6 +27,7 @@ def generate_file(vm_info, config):
|
||||
:return: None
|
||||
"""
|
||||
board_cfg_lib.parser_pci()
|
||||
board_cfg_lib.parse_mem()
|
||||
|
||||
compared_bdf = []
|
||||
|
||||
@ -51,14 +52,24 @@ def generate_file(vm_info, config):
|
||||
print("#include <vbar_base.h>", file=config)
|
||||
print("#include <mmu.h>", file=config)
|
||||
print("#include <page.h>", file=config)
|
||||
if vm_info.shmem.shmem_enabled == 'y':
|
||||
print("#include <ivshmem.h>", file=config)
|
||||
for vm_i, pci_bdf_devs_list in vm_info.cfg_pci.pci_devs.items():
|
||||
if not pci_bdf_devs_list:
|
||||
continue
|
||||
pci_cnt = 1
|
||||
if idx == 0:
|
||||
print("", file=config)
|
||||
print("/*", file=config)
|
||||
print(" * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for", file=config)
|
||||
print(" * passthrough device configuration and shm_name for ivshmem device configuration.", file=config)
|
||||
print(" */", file=config)
|
||||
print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR",file=config)
|
||||
print("", file=config)
|
||||
print("/*", file=config)
|
||||
print(" * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops", file=config)
|
||||
print(" * to simplify the code.", file=config)
|
||||
print(" */", file=config)
|
||||
print("struct acrn_vm_pci_dev_config " +
|
||||
"vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(vm_i, vm_i), file=config)
|
||||
print("\t{", file=config)
|
||||
@ -76,7 +87,7 @@ def generate_file(vm_info, config):
|
||||
fun = int(pci_bdf_dev.split('.')[1], 16)
|
||||
print("\t{", file=config)
|
||||
print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]), file=config)
|
||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x0{}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
||||
for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items():
|
||||
if bdf == pci_bdf_dev:
|
||||
print("\t\tPTDEV({}),".format(board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic[bdf].name_w_i_cnt), file=config)
|
||||
@ -85,4 +96,57 @@ def generate_file(vm_info, config):
|
||||
print("\t},", file=config)
|
||||
pci_cnt += 1
|
||||
|
||||
if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
|
||||
and len(vm_info.shmem.shmem_regions[vm_i]) > 0:
|
||||
raw_shm_list = vm_info.shmem.shmem_regions[vm_i]
|
||||
for shm in raw_shm_list:
|
||||
shm_splited = shm.split(',')
|
||||
print("\t{", file=config)
|
||||
print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config)
|
||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(pci_cnt), file=config)
|
||||
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
||||
for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
||||
index = shm_name[:shm_name.find('_')]
|
||||
shm_name = shm_name[shm_name.find('_') + 1:]
|
||||
if shm_name == shm_splited[0].strip():
|
||||
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".format(index), file=config)
|
||||
print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config)
|
||||
# print("\t\t.vbar_size[0] = 0x100,", file=config)
|
||||
# print("\t\t.vbar_size[2] = {},".format(shm_splited[1].strip()), file=config)
|
||||
# print('\t\t.shm_name = "{}",'.format(shm_splited[0].strip()), file=config)
|
||||
print("\t},", file=config)
|
||||
pci_cnt += 1
|
||||
|
||||
print("};", file=config)
|
||||
|
||||
if vm_info.shmem.shmem_enabled == 'y':
|
||||
for shm_i, raw_shm_list in vm_info.shmem.shmem_regions.items():
|
||||
shm_cnt = 0
|
||||
if shm_i not in vm_info.cfg_pci.pci_devs.keys() and len(raw_shm_list) > 0:
|
||||
print("", file=config)
|
||||
print("struct acrn_vm_pci_dev_config " +
|
||||
"vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(shm_i, shm_i), file=config)
|
||||
for shm in raw_shm_list:
|
||||
shm_splited = shm.split(',')
|
||||
print("\t{", file=config)
|
||||
print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config)
|
||||
if shm_i in common.VM_TYPES.keys() and common.VM_TYPES[shm_i] in ['PRE_RT_VM', 'PRE_STD_VM', 'SAFETY_VM']:
|
||||
print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},".format(shm_cnt), file=config)
|
||||
else:
|
||||
print("\t\t.vbdf.value = UNASSIGNED_VBDF,".format(shm_cnt), file=config)
|
||||
print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
|
||||
for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items():
|
||||
index = shm_name[:shm_name.find('_')]
|
||||
shm_name = shm_name[shm_name.find('_')+1:]
|
||||
if shm_name == shm_splited[0].strip():
|
||||
if shm_i in common.VM_TYPES.keys() and common.VM_TYPES[shm_i] in ['PRE_RT_VM', 'PRE_STD_VM',
|
||||
'SAFETY_VM']:
|
||||
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".format(index), file=config)
|
||||
print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config)
|
||||
break
|
||||
else:
|
||||
print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}".format(index), file=config)
|
||||
break
|
||||
shm_cnt += 1
|
||||
print("\t},", file=config)
|
||||
print("};", file=config)
|
@ -14,6 +14,7 @@ import scenario_cfg_lib
|
||||
import vm_configurations_c
|
||||
import vm_configurations_h
|
||||
import pci_dev_c
|
||||
import ivshmem_cfg_h
|
||||
import common
|
||||
import hv_cfg_lib
|
||||
import board_defconfig
|
||||
@ -21,7 +22,7 @@ from hv_item import HvInfo
|
||||
|
||||
ACRN_PATH = common.SOURCE_ROOT_DIR
|
||||
ACRN_CONFIG_DEF = ACRN_PATH + 'misc/vm_configs/scenarios/'
|
||||
GEN_FILE = ["vm_configurations.h", "vm_configurations.c", "pci_dev.c", ".config"]
|
||||
GEN_FILE = ["vm_configurations.h", "vm_configurations.c", "pci_dev.c", ".config", "ivshmem_cfg.h"]
|
||||
|
||||
|
||||
def get_scenario_item_values(board_info, scenario_info):
|
||||
@ -73,6 +74,7 @@ def get_scenario_item_values(board_info, scenario_info):
|
||||
scenario_item_values["hv,FEATURES,L1D_VMENTRY_ENABLED"] = hv_cfg_lib.N_Y
|
||||
scenario_item_values["hv,FEATURES,MCE_ON_PSC_DISABLED"] = hv_cfg_lib.N_Y
|
||||
scenario_item_values["hv,FEATURES,IOMMU_ENFORCE_SNP"] = hv_cfg_lib.N_Y
|
||||
scenario_item_values["hv,FEATURES,IVSHMEM,IVSHMEM_ENABLED"] = hv_cfg_lib.N_Y
|
||||
|
||||
scenario_cfg_lib.ERR_LIST.update(hv_cfg_lib.ERR_LIST)
|
||||
return scenario_item_values
|
||||
@ -90,15 +92,16 @@ def validate_scenario_setting(board_info, scenario_info):
|
||||
common.BOARD_INFO_FILE = board_info
|
||||
common.SCENARIO_INFO_FILE = scenario_info
|
||||
|
||||
scenario_info_items = {}
|
||||
vm_info = VmInfo(board_info, scenario_info)
|
||||
vm_info.get_info()
|
||||
vm_info.check_item()
|
||||
|
||||
hv_info = HvInfo(scenario_info)
|
||||
hv_info.get_info()
|
||||
hv_info.check_item()
|
||||
|
||||
scenario_info_items = {}
|
||||
vm_info = VmInfo(board_info, scenario_info)
|
||||
vm_info.get_info()
|
||||
vm_info.set_ivshmem(hv_info.mem.ivshmem_region)
|
||||
vm_info.check_item()
|
||||
|
||||
scenario_info_items['vm'] = vm_info
|
||||
scenario_info_items['hv'] = hv_info
|
||||
|
||||
@ -151,7 +154,7 @@ def main(args):
|
||||
else:
|
||||
scen_output = ACRN_PATH + params['--out'] + "/scenarios/" + scenario + "/"
|
||||
else:
|
||||
scen_output = ACRN_CONFIG_DEF + "/scenarios/" + scenario + "/"
|
||||
scen_output = ACRN_CONFIG_DEF + "/" + scenario + "/"
|
||||
|
||||
scen_board = scen_output + board_name + "/"
|
||||
common.mkdir(scen_board)
|
||||
@ -161,6 +164,7 @@ def main(args):
|
||||
vm_config_c = scen_output + GEN_FILE[1]
|
||||
pci_config_c = scen_board + GEN_FILE[2]
|
||||
config_hv = scen_board + board_name + GEN_FILE[3]
|
||||
ivshmem_config_h = scen_board + GEN_FILE[4]
|
||||
|
||||
# parse the scenario.xml
|
||||
get_scenario_item_values(params['--board'], params['--scenario'])
|
||||
@ -185,6 +189,11 @@ def main(args):
|
||||
if err_dic:
|
||||
return err_dic
|
||||
|
||||
# generate ivshmem_cfg.h
|
||||
print(ivshmem_config_h)
|
||||
with open(ivshmem_config_h, 'w') as config:
|
||||
ivshmem_cfg_h.generate_file(scenario_items, config)
|
||||
|
||||
# generate pci_dev.c
|
||||
with open(pci_config_c, 'w') as config:
|
||||
pci_dev_c.generate_file(scenario_items['vm'], config)
|
||||
|
@ -228,6 +228,38 @@ class EpcSection:
|
||||
self.size = common.get_leaf_tag_map(self.scenario_info, "epc_section", "size")
|
||||
|
||||
|
||||
class ShareMem:
|
||||
""" This is the class to get Share Memory regions for VMs """
|
||||
shmem_enabled = 'n'
|
||||
raw_shmem_regions = []
|
||||
shmem_regions = {}
|
||||
shmem_num = {}
|
||||
|
||||
def __init__(self, scenario_info):
|
||||
self.scenario_info = scenario_info
|
||||
|
||||
def set_ivshmem(self, ivshmem_regions):
|
||||
"""
|
||||
set ivshmem regions for VMs.
|
||||
:param ivshmem_regions:
|
||||
:return:
|
||||
"""
|
||||
self.raw_shmem_regions = ivshmem_regions
|
||||
self.shmem_enabled = common.get_hv_item_tag(self.scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
|
||||
self.shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_regions)
|
||||
self.shmem_num = scenario_cfg_lib.get_shmem_num(self.shmem_regions)
|
||||
|
||||
def check_items(self):
|
||||
'''
|
||||
check the configurations for share memories.
|
||||
:return:
|
||||
'''
|
||||
if self.shmem_enabled == 'y':
|
||||
vm_type_info = common.get_leaf_tag_map(self.scenario_info, "vm_type")
|
||||
scenario_cfg_lib.share_mem_check(self.shmem_regions, self.raw_shmem_regions, vm_type_info,
|
||||
"FEATURES", "IVSHMEM", "IVSHMEM_REGION")
|
||||
|
||||
|
||||
class LoadOrderNum:
|
||||
""" This is Abstract of VM number for different load order """
|
||||
def __init__(self):
|
||||
@ -259,6 +291,7 @@ class VmInfo:
|
||||
self.vuart = VuartInfo(self.scenario_info)
|
||||
self.cfg_pci = CfgPci(self.scenario_info)
|
||||
self.load_order_cnt = LoadOrderNum()
|
||||
self.shmem = ShareMem(self.scenario_info)
|
||||
|
||||
def get_info(self):
|
||||
"""
|
||||
@ -281,6 +314,14 @@ class VmInfo:
|
||||
self.cfg_pci.get_info()
|
||||
self.load_order_cnt.get_info(self.load_vm)
|
||||
|
||||
def set_ivshmem(self, ivshmem_regions):
|
||||
"""
|
||||
set ivshmem regions for VMs
|
||||
:param ivshmem_regions:
|
||||
:return:
|
||||
"""
|
||||
self.shmem.set_ivshmem(ivshmem_regions)
|
||||
|
||||
def get_cpu_bitmap(self, index):
|
||||
"""
|
||||
:param index: index list in GUESF_FLAGS
|
||||
@ -310,4 +351,5 @@ class VmInfo:
|
||||
self.os_cfg.check_item()
|
||||
self.cfg_pci.check_item()
|
||||
self.vuart.check_item()
|
||||
self.shmem.check_items()
|
||||
scenario_cfg_lib.ERR_LIST.update(err_dic)
|
||||
|
@ -340,7 +340,9 @@ def gen_pre_launch_vm(vm_type, vm_i, scenario_items, config):
|
||||
if err_dic:
|
||||
return err_dic
|
||||
|
||||
if vm_info.cfg_pci.pci_devs[vm_i] and vm_info.cfg_pci.pci_devs[vm_i] != None:
|
||||
if (vm_i in vm_info.cfg_pci.pci_devs.keys() and vm_info.cfg_pci.pci_devs[vm_i]) or \
|
||||
(vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
|
||||
and vm_info.shmem.shmem_regions[vm_i]):
|
||||
print("\t\t.pci_dev_num = VM{}_CONFIG_PCI_DEV_NUM,".format(vm_i), file=config)
|
||||
print("\t\t.pci_devs = vm{}_pci_devs,".format(vm_i), file=config)
|
||||
|
||||
@ -363,6 +365,11 @@ def gen_post_launch_vm(vm_type, vm_i, scenario_items, config):
|
||||
print("\t{{\t/* VM{} */".format(vm_i), file=config)
|
||||
print("\t\t{},".format(post_vm_type), file=config)
|
||||
clos_output(scenario_items, vm_i, config)
|
||||
if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
|
||||
and vm_info.shmem.shmem_regions[vm_i]:
|
||||
print("\t\t/* The PCI device configuration is only for in-hypervisor vPCI devices. */", file=config)
|
||||
print("\t\t.pci_dev_num = VM{}_CONFIG_PCI_DEV_NUM,".format(vm_i), file=config)
|
||||
print("\t\t.pci_devs = vm{}_pci_devs,".format(vm_i), file=config)
|
||||
cpu_affinity_output(vm_info, vm_i, config)
|
||||
is_need_epc(vm_info.epc_section, vm_i, config)
|
||||
# VUART
|
||||
@ -376,10 +383,13 @@ def gen_post_launch_vm(vm_type, vm_i, scenario_items, config):
|
||||
def pre_launch_definiation(vm_info, config):
|
||||
|
||||
for vm_i,vm_type in common.VM_TYPES.items():
|
||||
if "PRE_LAUNCHED_VM" != scenario_cfg_lib.VM_DB[vm_type]['load_type']:
|
||||
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] not in ["PRE_LAUNCHED_VM", "POST_LAUNCHED_VM"]:
|
||||
continue
|
||||
print("extern struct acrn_vm_pci_dev_config " +
|
||||
"vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM];".format(vm_i, vm_i), file=config)
|
||||
if (vm_i in vm_info.cfg_pci.pci_devs.keys() and vm_info.cfg_pci.pci_devs[vm_i]) \
|
||||
or (vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
|
||||
and vm_info.shmem.shmem_regions[vm_i]):
|
||||
print("extern struct acrn_vm_pci_dev_config " +
|
||||
"vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM];".format(vm_i, vm_i), file=config)
|
||||
print("", file=config)
|
||||
|
||||
def generate_file(scenario_items, config):
|
||||
|
@ -91,7 +91,12 @@ def gen_pre_launch_vm(scenario_items, config):
|
||||
print("#define VM{0}_CONFIG_MEM_SIZE_HPA2 {1}UL".format(
|
||||
vm_i, vm_info.mem_info.mem_size_hpa2[vm_i]), file=config)
|
||||
|
||||
print("#define VM{}_CONFIG_PCI_DEV_NUM {}U".format(vm_i, vm_info.cfg_pci.pci_dev_num[vm_i]), file=config)
|
||||
shmem_num_i = 0
|
||||
if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_num.keys():
|
||||
shmem_num_i = vm_info.shmem.shmem_num[vm_i]
|
||||
|
||||
print("#define VM{}_CONFIG_PCI_DEV_NUM {}U".format(vm_i,
|
||||
vm_info.cfg_pci.pci_dev_num[vm_i] + shmem_num_i), file=config)
|
||||
print("", file=config)
|
||||
vm_i += 1
|
||||
|
||||
@ -108,6 +113,10 @@ def gen_post_launch_header(scenario_items, config):
|
||||
is_post_vm_available = True
|
||||
cpu_affinity_output(vm_info, vm_i, config)
|
||||
clos_config_output(scenario_items, vm_i, config)
|
||||
|
||||
if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_num.keys():
|
||||
print("#define VM{}_CONFIG_PCI_DEV_NUM {}U".format(vm_i,
|
||||
vm_info.shmem.shmem_num[vm_i]), file=config)
|
||||
vm_i += 1
|
||||
|
||||
if is_post_vm_available:
|
||||
|
Loading…
Reference in New Issue
Block a user