diff --git a/misc/config_tools/acpi_gen/asl_gen.py b/misc/config_tools/acpi_gen/asl_gen.py index 2fb847fbc..2566e671d 100644 --- a/misc/config_tools/acpi_gen/asl_gen.py +++ b/misc/config_tools/acpi_gen/asl_gen.py @@ -800,8 +800,8 @@ def main(args): dict_pcpu_list = collections.OrderedDict() for vm in scenario_root.findall('vm'): vm_id = vm.attrib['id'] - vm_type_node = vm.find('vm_type') - if (vm_type_node is not None) and (vm_type_node.text in ['PRE_STD_VM', 'SAFETY_VM', 'PRE_RT_VM']): + load_order_node = vm.find('load_order') + if (load_order_node is not None) and (load_order_node.text == 'PRE_LAUNCHED_VM'): dict_passthru_devices[vm_id] = [] for pci_dev_node in vm.findall('pci_devs/pci_dev'): if pci_dev_node is not None and pci_dev_node.text is not None and pci_dev_node.text.strip(): @@ -824,7 +824,8 @@ def main(args): for vm in scenario_root.findall('vm'): vm_id = vm.attrib['id'] vm_type_node = vm.find('vm_type') - if (vm_type_node is not None) and (vm_type_node.text in ['PRE_RT_VM']): + load_order_node = vm.find('load_order') + if (load_order_node is not None) and (load_order_node.text == 'PRE_LAUNCHED_VM') and (vm_type_node.text == 'RTVM'): PRELAUNCHED_RTVM_ID = vm_id break except: diff --git a/misc/config_tools/acpi_gen/bin_gen.py b/misc/config_tools/acpi_gen/bin_gen.py index 51f6a3d6e..8750eadd8 100644 --- a/misc/config_tools/acpi_gen/bin_gen.py +++ b/misc/config_tools/acpi_gen/bin_gen.py @@ -82,7 +82,7 @@ def asl_to_aml(dest_vm_acpi_path, dest_vm_acpi_bin_path, scenario_etree, allocat rtct = acpiparser.rtct.RTCT(os.path.join(dest_vm_acpi_path, acpi_table[0])) outfile = os.path.join(dest_vm_acpi_bin_path, acpi_table[1]) # move the guest ssram area to the area next to ACPI region - pre_rt_vms = common.get_node("//vm[vm_type ='PRE_RT_VM']", scenario_etree) + pre_rt_vms = common.get_node("//vm[load_order ='PRE_LAUNCHED_VM' and vm_type ='RTVM']", scenario_etree) vm_id = pre_rt_vms.get("id") allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree) ssram_start_gpa = common.get_node("./ssram/start_gpa/text()", allocation_vm_node) diff --git a/misc/config_tools/board_config/board_cfg_gen.py b/misc/config_tools/board_config/board_cfg_gen.py index 73ffd7f80..d7624b857 100755 --- a/misc/config_tools/board_config/board_cfg_gen.py +++ b/misc/config_tools/board_config/board_cfg_gen.py @@ -13,7 +13,6 @@ import board_info_h import pci_devices_h import acpi_platform_h import common -import vbar_base_h ACRN_PATH = common.SOURCE_ROOT_DIR ACRN_CONFIG_DEF = ACRN_PATH + "misc/config_tools/data/" @@ -42,7 +41,7 @@ def main(args): common.BOARD_INFO_FILE = params['--board'] common.SCENARIO_INFO_FILE = params['--scenario'] common.get_vm_num(params['--scenario']) - common.get_vm_types() + common.get_load_order() if common.VM_COUNT > common.MAX_VM_NUM: err_dic['vm count'] = "The number of VMs in the scenario XML file should be no greater than " \ @@ -84,7 +83,6 @@ def main(args): config_board = board_fix_dir + GEN_FILE[1] config_acpi = board_fix_dir + GEN_FILE[2] config_board_h = board_fix_dir + GEN_FILE[4] - config_vbar_base = scen_board_dir + GEN_FILE[5] # generate pci_devices.h with open(config_pci, 'w+') as config: @@ -102,10 +100,6 @@ def main(args): if err_dic: return err_dic - # generate vbar_base.h - with open(config_vbar_base, 'w+') as config: - vbar_base_h.generate_file(config) - # generate platform_acpi_info.h with open(config_acpi, 'w+') as config: acpi_platform_h.generate_file(config, ACRN_DEFAULT_ACPI) diff --git a/misc/config_tools/board_config/board_info_h.py b/misc/config_tools/board_config/board_info_h.py index 49c612d77..285811a9f 100644 --- a/misc/config_tools/board_config/board_info_h.py +++ b/misc/config_tools/board_config/board_info_h.py @@ -92,8 +92,7 @@ def generate_file(config): find_hi_mmio_window(config) p2sb = common.get_leaf_tag_map_bool(common.SCENARIO_INFO_FILE, "mmio_resources", "p2sb") - if (common.VM_TYPES.get(0) is not None and - scenario_cfg_lib.VM_DB[common.VM_TYPES[0]]['load_type'] == "PRE_LAUNCHED_VM" + if (common.LOAD_ORDER.get(0) == "PRE_LAUNCHED_VM" and board_cfg_lib.is_p2sb_passthru_possible() and p2sb.get(0, False)): print("", file=config) diff --git a/misc/config_tools/launch_config/launch_cfg_gen.py b/misc/config_tools/launch_config/launch_cfg_gen.py index 36bcc4f72..95dde64c8 100755 --- a/misc/config_tools/launch_config/launch_cfg_gen.py +++ b/misc/config_tools/launch_config/launch_cfg_gen.py @@ -314,8 +314,8 @@ def main(board_xml, scenario_xml, launch_xml, user_vm_id, out_dir): scenario_etree = etree.parse(scenario_xml) launch_etree = etree.parse(launch_xml) - service_vm_id = eval_xpath(scenario_etree, "//vm[vm_type='SERVICE_VM']/@id") - post_vms = scenario_etree.xpath("//vm[starts-with(vm_type, 'POST_')]") + service_vm_id = eval_xpath(scenario_etree, "//vm[load_order='SERVICE_VM']/@id") + post_vms = scenario_etree.xpath("//vm[starts-with(load_order, 'POST_')]") if service_vm_id is None and len(post_vms) > 0: logging.error("The scenario does not define a service VM so no launch scripts will be generated for the post-launched VMs in the scenario.") return 1 diff --git a/misc/config_tools/library/common.py b/misc/config_tools/library/common.py index e5915faea..b3d91b929 100644 --- a/misc/config_tools/library/common.py +++ b/misc/config_tools/library/common.py @@ -38,7 +38,8 @@ VM_COUNT = 0 BOARD_INFO_FILE = "" SCENARIO_INFO_FILE = "" LAUNCH_INFO_FILE = "" -VM_TYPES = {} +LOAD_ORDER = {} +RTVM = {} MAX_VM_NUM = 16 MAX_VUART_NUM = 8 @@ -595,9 +596,13 @@ def num2int(str_value): return val -def get_vm_types(): - global VM_TYPES - VM_TYPES = get_leaf_tag_map(SCENARIO_INFO_FILE, "vm_type") +def get_load_order(): + global LOAD_ORDER + LOAD_ORDER = get_leaf_tag_map(SCENARIO_INFO_FILE, "load_order") + +def get_RTVM(): + global RTVM + RTVM = get_leaf_tag_map(SCENARIO_INFO_FILE, "vm_type") def get_avl_dev_info(bdf_desc_map, pci_sub_class): diff --git a/misc/config_tools/library/launch_cfg_lib.py b/misc/config_tools/library/launch_cfg_lib.py index 946713e5f..651ccdcd2 100644 --- a/misc/config_tools/library/launch_cfg_lib.py +++ b/misc/config_tools/library/launch_cfg_lib.py @@ -163,8 +163,8 @@ def post_vm_cnt(config_file): """ post_launch_cnt = 0 - for vm_type in common.VM_TYPES.values(): - if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM": + for load_order in common.LOAD_ORDER.values(): + if load_order == "POST_LAUNCHED_VM": post_launch_cnt += 1 return post_launch_cnt @@ -202,8 +202,8 @@ def is_config_file_match(): def get_sos_vmid(): sos_id = '' - for vm_i,vm_type in common.VM_TYPES.items(): - if vm_type == "SERVICE_VM": + for vm_i,load_order in common.LOAD_ORDER.items(): + if load_order == "SERVICE_VM": sos_id = vm_i break @@ -592,16 +592,16 @@ def set_shm_regions(launch_item_values, scenario_info): try: raw_shmem_regions = common.get_hv_item_tag(scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") - vm_types = common.get_leaf_tag_map(scenario_info, "vm_type") + load_orders = common.get_leaf_tag_map(scenario_info, "load_order") shm_enabled = common.get_hv_item_tag(scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") except: return sos_vm_id = 0 - for vm_id, vm_type in vm_types.items(): - if vm_type in ['SERVICE_VM']: + for vm_id,load_order in load_orders.items(): + if load_order in ['SERVICE_VM']: sos_vm_id = vm_id - elif vm_type in ['POST_STD_VM', 'POST_RT_VM']: + elif load_order in ['POST_LAUNCHED_VM']: user_vmid = vm_id - sos_vm_id shm_region_key = 'user_vm:id={},shm_regions,shm_region'.format(user_vmid) launch_item_values[shm_region_key] = [''] @@ -623,13 +623,13 @@ def set_shm_regions(launch_item_values, scenario_info): def set_pci_vuarts(launch_item_values, scenario_info): try: launch_item_values['user_vm,console_vuart'] = DM_VUART0 - vm_types = common.get_leaf_tag_map(scenario_info, 'vm_type') + load_orders = common.get_leaf_tag_map(scenario_info, 'load_order') sos_vm_id = 0 - for vm_id, vm_type in vm_types.items(): - if vm_type in ['SERVICE_VM']: + for vm_id,load_order in load_orders.items(): + if load_order in ['SERVICE_VM']: sos_vm_id = vm_id for vm in list(common.get_config_root(scenario_info)): - if vm.tag == 'vm' and scenario_cfg_lib.VM_DB[vm_types[int(vm.attrib['id'])]]['load_type'] == 'POST_LAUNCHED_VM': + if vm.tag == 'vm' and load_orders[int(vm.attrib['id'])] == 'POST_LAUNCHED_VM': user_vmid = int(vm.attrib['id']) - sos_vm_id pci_vuart_key = 'user_vm:id={},communication_vuarts,communication_vuart'.format(user_vmid) for elem in list(vm): diff --git a/misc/config_tools/library/scenario_cfg_lib.py b/misc/config_tools/library/scenario_cfg_lib.py index 7e68a5d94..6701ca380 100644 --- a/misc/config_tools/library/scenario_cfg_lib.py +++ b/misc/config_tools/library/scenario_cfg_lib.py @@ -139,9 +139,9 @@ def get_pci_vuart_num(vuarts): vuarts_num[vm_i] += 1 for vm_i in vuart0_setting: - vm_type = common.VM_TYPES[vm_i] + load_order = common.LOAD_ORDER[vm_i] # Skip post-launched vm's pci base vuart0 - if "POST_LAUNCHED_VM" == VM_DB[vm_type]['load_type'] and 0 in vuarts[vm_i].keys() \ + if "POST_LAUNCHED_VM" == load_order and 0 in vuarts[vm_i].keys() \ and vuarts[vm_i][0]['base'] != "INVALID_PCI_BASE": vuarts_num[vm_i] -= 1 continue @@ -176,19 +176,19 @@ def get_pci_dev_num_per_vm(): vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) vuarts_num = get_pci_vuart_num(vuarts) - for vm_i,vm_type in common.VM_TYPES.items(): - if "POST_LAUNCHED_VM" == VM_DB[vm_type]['load_type']: + for vm_i,load_order in common.LOAD_ORDER.items(): + if "POST_LAUNCHED_VM" == load_order: shmem_num_i = 0 vuart_num = vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = shmem_num_i + vuart_num - elif "PRE_LAUNCHED_VM" == VM_DB[vm_type]['load_type']: + elif "PRE_LAUNCHED_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = pt_pci_num[vm_i] + shmem_num_i + vuarts_num[vm_i] - elif "SERVICE_VM" == VM_DB[vm_type]['load_type']: + elif "SERVICE_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] @@ -328,16 +328,16 @@ def vm_cpu_affinity_check(scenario_file, launch_file, cpu_affinity): sos_vm_cpus = [] pre_launch_cpus = [] post_launch_cpus = [] - for vm_i, vm_type in common.VM_TYPES.items(): + for vm_i, load_order in common.LOAD_ORDER.items(): if vm_i not in cpu_affinity.keys(): continue - elif VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": + elif VM_DB[load_order]['load_type'] == "PRE_LAUNCHED_VM": cpus = [x for x in cpu_affinity[vm_i] if not None] pre_launch_cpus.extend(cpus) - elif VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM": + elif VM_DB[load_order]['load_type'] == "POST_LAUNCHED_VM": cpus = [x for x in cpu_affinity[vm_i] if not None] post_launch_cpus.extend(cpus) - elif VM_DB[vm_type]['load_type'] == "SERVICE_VM": + elif VM_DB[load_order]['load_type'] == "SERVICE_VM": cpus = [x for x in cpu_affinity[vm_i] if not None] sos_vm_cpus.extend(cpus) @@ -350,7 +350,7 @@ def vm_cpu_affinity_check(scenario_file, launch_file, cpu_affinity): return err_dic if pre_launch_cpus: - if "SERVICE_VM" in common.VM_TYPES and not sos_vm_cpus: + if "SERVICE_VM" in common.LOAD_ORDER and not sos_vm_cpus: key = "Service VM cpu_affinity" err_dic[key] = "Should assign CPU id for Service VM" @@ -523,7 +523,7 @@ def get_vuart1_vmid(vm_vuart1): """ vm_id_dic = {} new_vm_id_dic = {} - for i in list(common.VM_TYPES.keys()): + for i in list(common.LOAD_ORDER.keys()): for key in vm_vuart1[i].keys(): if key == "target_vm_id": vm_id_dic[i] = vm_vuart1[i][key] @@ -547,16 +547,16 @@ def cpus_assignment(cpus_per_vm, index): :return: cpu assignment string """ vm_cpu_bmp = {} - if "SERVICE_VM" == common.VM_TYPES[index]: + if "SERVICE_VM" == common.LOAD_ORDER[index]: if index not in cpus_per_vm or cpus_per_vm[index] == [None]: sos_extend_all_cpus = board_cfg_lib.get_processor_info() pre_all_cpus = [] for vmid, cpu_list in cpus_per_vm.items(): - if vmid in common.VM_TYPES: - vm_type = common.VM_TYPES[vmid] + if vmid in common.LOAD_ORDER: + load_order = common.LOAD_ORDER[vmid] load_type = '' - if vm_type in VM_DB: - load_type = VM_DB[vm_type]['load_type'] + if load_order in VM_DB: + load_type = VM_DB[load_order]['load_type'] if load_type == "PRE_LAUNCHED_VM": pre_all_cpus += cpu_list cpus_per_vm[index] = list(set(sos_extend_all_cpus) - set(pre_all_cpus)) @@ -605,9 +605,9 @@ def clos_assignment(clos_per_vm, index): def avl_vuart_ui_select(scenario_info): tmp_vuart = {} - for vm_i,vm_type in common.VM_TYPES.items(): + for vm_i,load_order in common.LOAD_ORDER.items(): - if "SERVICE_VM" == VM_DB[vm_type]['load_type']: + if "SERVICE_VM" == VM_DB[load_order]['load_type']: key = "vm={},legacy_vuart=0,base".format(vm_i) tmp_vuart[key] = ['SERVICE_VM_COM1_BASE', 'INVALID_COM_BASE'] key = "vm={},legacy_vuart=1,base".format(vm_i) @@ -624,8 +624,8 @@ def avl_vuart_ui_select(scenario_info): def get_first_post_vm(): i = 0 - for vm_i,vm_type in common.VM_TYPES.items(): - if "POST_LAUNCHED_VM" == VM_DB[vm_type]['load_type']: + for vm_i,load_order in common.LOAD_ORDER.items(): + if "POST_LAUNCHED_VM" == VM_DB[load_order]['load_type']: i = vm_i break @@ -678,7 +678,7 @@ def check_vuart(v0_vuart, v1_vuart): target_id_keys = list(vm_target_id_dic.keys()) i = 0 for vm_i,t_vm_id in vm_target_id_dic.items(): - if t_vm_id.isnumeric() and int(t_vm_id) not in common.VM_TYPES.keys(): + if t_vm_id.isnumeric() and int(t_vm_id) not in common.LOAD_ORDER.keys(): key = "vm:id={},legacy_vuart:id=1,target_vm_id".format(vm_i) ERR_LIST[key] = "target_vm_id which specified does not exist" @@ -808,10 +808,10 @@ def get_target_vm_id(vuart, vm_id): "target_vm_id should be present and numeric: {!r}".format( target_vm_id_str)) - if target_vm_id not in common.VM_TYPES: + if target_vm_id not in common.LOAD_ORDER: raise XmlError( 'invalid target_vm_id: target_vm_id={!r}, vm_ids={}'.format( - target_vm_id, common.VM_TYPES.keys())) + target_vm_id, common.LOAD_ORDER.keys())) if target_vm_id == vm_id: raise XmlError( @@ -950,7 +950,7 @@ def is_target_vm_available(target_vm_id, vm_visited, legacy_vuart1_visited): raise TypeError('legacy_vuart1_visited should be a dict: {}, {!r}' \ .format(type(legacy_vuart1_visited), legacy_vuart1_visited)) - if target_vm_id not in common.VM_TYPES: + if target_vm_id not in common.LOAD_ORDER: raise TargetError("target vm {} is not present".format(target_vm_id)) if target_vm_id in vm_visited: pass @@ -1151,7 +1151,7 @@ def check_p2sb(enable_p2sb): ERR_LIST[key] = "Can only specify p2sb passthru for VM0" return - if p2sb and not VM_DB[common.VM_TYPES[0]]['load_type'] == "PRE_LAUNCHED_VM": + if p2sb and not VM_DB[common.LOAD_ORDER[0]]['load_type'] == "PRE_LAUNCHED_VM": ERR_LIST["vm:id=0,p2sb"] = "p2sb passthru can only be enabled for Pre-launched VM" return @@ -1176,7 +1176,7 @@ def check_pt_intx(phys_gsi, virt_gsi): ERR_LIST["pt_intx"] = "only board ehl-crb-b/generic_board is supported" return - if not VM_DB[common.VM_TYPES[0]]['load_type'] == "PRE_LAUNCHED_VM": + if not VM_DB[common.LOAD_ORDER[0]]['load_type'] == "PRE_LAUNCHED_VM": ERR_LIST["pt_intx"] = "pt_intx can only be specified for pre-launched VM" return diff --git a/misc/config_tools/scenario_config/scenario_cfg_gen.py b/misc/config_tools/scenario_config/scenario_cfg_gen.py index 0f277eabe..d25bce17b 100755 --- a/misc/config_tools/scenario_config/scenario_cfg_gen.py +++ b/misc/config_tools/scenario_config/scenario_cfg_gen.py @@ -45,7 +45,7 @@ def get_scenario_item_values(board_info, scenario_info): common.BOARD_INFO_FILE = board_info common.SCENARIO_INFO_FILE = scenario_info common.get_vm_num(scenario_info) - common.get_vm_types() + common.get_load_order() # per scenario guest_flags = copy.deepcopy(common.GUEST_FLAG) @@ -199,7 +199,7 @@ def main(args): common.BOARD_INFO_FILE = params['--board'] common.SCENARIO_INFO_FILE = params['--scenario'] common.get_vm_num(params['--scenario']) - common.get_vm_types() + common.get_load_order() # get board name (err_dic, board_name) = common.get_board_name() diff --git a/misc/config_tools/schema/VMtypes.xsd b/misc/config_tools/schema/VMtypes.xsd index f995a5d69..2e6bd5366 100644 --- a/misc/config_tools/schema/VMtypes.xsd +++ b/misc/config_tools/schema/VMtypes.xsd @@ -3,24 +3,37 @@ xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:acrn="https://projectacrn.org"> - + Current supported VM types are: -- ``SAFETY_VM`` pre-launched Safety VM -- ``PRE_STD_VM`` pre-launched Standard VM -- ``PRE_RT_VM`` pre-launched real-time capable VM -- ``SERVICE_VM`` pre-launched Service VM -- ``POST_STD_VM`` post-launched Standard VM -- ``POST_RT_VM`` post-launched real-time capable VM +- ``PRE_LAUNCHED_VM`` pre-launched VM +- ``SERVICE_VM`` Service VM +- ``POST_LAUNCHED_VM`` post-launched VM - - - - - + + + + + + + + Current supported VM types are: + +- ``RTVM`` Read-time VM +- ``STANDARD_VM`` Service VM +- ``TEE_VM`` VM with Trusted Execution Environment, which could provide high level + security of code execution +- ``REE_VM`` VM with Rich Execution Environment, which is a companion VM with TEE_VM, + and could provide more features and applications, but is vulnerable to attacks + + + + + + diff --git a/misc/config_tools/schema/checks/pre_launched_vm_support.xsd b/misc/config_tools/schema/checks/pre_launched_vm_support.xsd index cc9a9fbe3..712b92a0f 100644 --- a/misc/config_tools/schema/checks/pre_launched_vm_support.xsd +++ b/misc/config_tools/schema/checks/pre_launched_vm_support.xsd @@ -36,7 +36,7 @@ or little cores are assigned, but not both. - @@ -52,8 +52,8 @@ https://github.com/projectacrn/acrn-hypervisor/issues if you meet this. - + There should not be both pre-launched RTVM and post-launched RTVM. And two or more pre-launched RTVM are not allowed. diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd index 2f8f5cfc4..3a58e7fde 100644 --- a/misc/config_tools/schema/config.xsd +++ b/misc/config_tools/schema/config.xsd @@ -25,7 +25,7 @@ hypervisor shell. Specify the host serial device used for hypervisor debugging. This option is only valid if :option:`hv.DEBUG_OPTIONS.RELEASE` is set to ``n``. -This option impacts the content of ``vm.(legacy_vuart id="0").base`` when :option:`vm.vm_type` is ``SERVICE_VM``, +This option impacts the content of ``vm.(legacy_vuart id="0").base`` when :option:`vm.load_order` is ``SERVICE_VM``, which specifies the PIO base for Service VM legacy vUART 0 (used for the console). The PIO base for the Service VM's legacy vUART 0 is determined using these rules: @@ -303,11 +303,16 @@ If this value is empty, then the default value will be calculated from informati - - + + Specify the VM type. + + + Specify the load_order. + + Specify the VM name shown in the @@ -412,7 +417,7 @@ its ``id`` attribute. When it is enabled, specify which target VM's vUART the cu - + Service VM cannot use LAPIC passthrough unless GUEST_FLAG_NVMX_ENABLED is set. diff --git a/misc/config_tools/service_vm_config/serial_config.py b/misc/config_tools/service_vm_config/serial_config.py index f4a286588..455c63d43 100644 --- a/misc/config_tools/service_vm_config/serial_config.py +++ b/misc/config_tools/service_vm_config/serial_config.py @@ -30,7 +30,7 @@ def main(args): allocation_etree = lxml.etree.parse(args.allocation) vuart_target_vmid = [0] * VUART_DEV_NAME_NUM - vm_list = scenario_etree.xpath("//vm[vm_type = 'SERVICE_VM']") + vm_list = scenario_etree.xpath("//vm[load_order = 'SERVICE_VM']") for vm in vm_list: for legacy_vuart in vm.iter(tag = 'legacy_vuart'): if legacy_vuart.find('target_vm_id') != None: @@ -38,7 +38,7 @@ def main(args): legacy_vuartid = int(legacy_vuart.attrib["id"]) vuart_target_vmid[legacy_vuartid] = user_vm_id - vm_list = allocation_etree.xpath("//vm[vm_type = 'SERVICE_VM']") + vm_list = allocation_etree.xpath("//vm[load_order = 'SERVICE_VM']") for vm in vm_list: vuart_list = find_non_standard_uart(vm) if len(vuart_list) != 0: diff --git a/misc/config_tools/static_allocators/bdf.py b/misc/config_tools/static_allocators/bdf.py index 56d423af5..7952af473 100644 --- a/misc/config_tools/static_allocators/bdf.py +++ b/misc/config_tools/static_allocators/bdf.py @@ -95,11 +95,10 @@ def get_devs_bdf_passthrough(scenario_etree): return: list of passtrhough devices' bdf. """ dev_list = [] - for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE: - pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()") - for pt_dev in pt_devs: - bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0]) - dev_list.append(bdf) + pt_devs = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']/pci_devs/pci_dev/text()") + for pt_dev in pt_devs: + bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0]) + dev_list.append(bdf) return dev_list def create_device_node(allocation_etree, vm_id, devdict): @@ -141,11 +140,11 @@ def fn(board_etree, scenario_etree, allocation_etree): vm_id = vm_node.get('id') devdict = {} used = [] - vm_type = common.get_node("./vm_type/text()", vm_node) - if vm_type is not None and lib.lib.is_post_launched_vm(vm_type): + load_order = common.get_node("./load_order/text()", vm_node) + if load_order is not None and lib.lib.is_post_launched_vm(load_order): continue - if vm_type is not None and lib.lib.is_sos_vm(vm_type): + if load_order is not None and lib.lib.is_service_vm(load_order): native_used = get_devs_bdf_native(board_etree) passthrough_used = get_devs_bdf_passthrough(scenario_etree) used = [bdf for bdf in native_used if bdf not in passthrough_used] diff --git a/misc/config_tools/static_allocators/cpu_affinity.py b/misc/config_tools/static_allocators/cpu_affinity.py index 00b4f1486..66f003d37 100644 --- a/misc/config_tools/static_allocators/cpu_affinity.py +++ b/misc/config_tools/static_allocators/cpu_affinity.py @@ -10,14 +10,14 @@ sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', ' import common, board_cfg_lib def sos_cpu_affinity(etree): - if common.get_node("//vm[vm_type = 'SERVICE_VM']", etree) is None: + if common.get_node("//vm[load_order = 'SERVICE_VM']", etree) is None: return None - if common.get_node("//vm[vm_type = 'SERVICE_VM' and count(cpu_affinity)]", etree) is not None: + if common.get_node("//vm[load_order = 'SERVICE_VM' and count(cpu_affinity)]", etree) is not None: return None sos_extend_all_cpus = board_cfg_lib.get_processor_info() - pre_all_cpus = etree.xpath("//vm[vm_type = 'PRE_RT_VM' or vm_type = 'PRE_STD_VM' or vm_type = 'SAFETY_VM']/cpu_affinity/pcpu_id/text()") + pre_all_cpus = etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/cpu_affinity/pcpu_id/text()") cpus_for_sos = list(set(sos_extend_all_cpus) - set(pre_all_cpus)) return sorted(cpus_for_sos) @@ -25,12 +25,12 @@ def sos_cpu_affinity(etree): def fn(board_etree, scenario_etree, allocation_etree): cpus_for_sos = sos_cpu_affinity(scenario_etree) if cpus_for_sos: - if common.get_node("//vm[vm_type = 'SERVICE_VM']", scenario_etree) is not None: - vm_id = common.get_node("//vm[vm_type = 'SERVICE_VM']/@id", scenario_etree) + if common.get_node("//vm[load_order = 'SERVICE_VM']", scenario_etree) is not None: + vm_id = common.get_node("//vm[load_order = 'SERVICE_VM']/@id", scenario_etree) allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id='{vm_id}']", allocation_etree) if allocation_sos_vm_node is None: allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id) - if common.get_node("./vm_type", allocation_sos_vm_node) is None: - common.append_node("./vm_type", "SERVICE_VM", allocation_sos_vm_node) + if common.get_node("./load_order", allocation_sos_vm_node) is None: + common.append_node("./load_order", "SERVICE_VM", allocation_sos_vm_node) for pcpu_id in cpus_for_sos: common.append_node("./cpu_affinity/pcpu_id", str(pcpu_id), allocation_sos_vm_node) diff --git a/misc/config_tools/static_allocators/gpa.py b/misc/config_tools/static_allocators/gpa.py index dbcb71f25..3eb0f30db 100644 --- a/misc/config_tools/static_allocators/gpa.py +++ b/misc/config_tools/static_allocators/gpa.py @@ -252,19 +252,18 @@ def get_devs_mem_passthrough(board_etree, scenario_etree): return: list of passtrhough devices' mmio windows. """ dev_list = [] - for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE: - pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()") - for pt_dev in pt_devs: - bdf = pt_dev.split()[0] - bus = int(bdf.split(':')[0], 16) - dev = int(bdf.split(":")[1].split('.')[0], 16) - func = int(bdf.split(":")[1].split('.')[1], 16) - resources = board_etree.xpath(f"//bus[@address = '{hex(bus)}']/device[@address = '{hex((dev << 16) | func)}'] \ - /resource[@type = 'memory' and @len != '0x0' and @width]") - for resource in resources: - start = resource.get('min') - end = resource.get('max') - dev_list.append(AddrWindow(int(start, 16), int(end, 16))) + pt_devs = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']/pci_devs/pci_dev/text()") + for pt_dev in pt_devs: + bdf = pt_dev.split()[0] + bus = int(bdf.split(':')[0], 16) + dev = int(bdf.split(":")[1].split('.')[0], 16) + func = int(bdf.split(":")[1].split('.')[1], 16) + resources = board_etree.xpath(f"//bus[@address = '{hex(bus)}']/device[@address = '{hex((dev << 16) | func)}'] \ + /resource[@type = 'memory' and @len != '0x0' and @width]") + for resource in resources: + start = resource.get('min') + end = resource.get('max') + dev_list.append(AddrWindow(int(start, 16), int(end, 16))) return dev_list def get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node): @@ -293,11 +292,10 @@ def get_pt_devs_io_port_passthrough(board_etree, scenario_etree): return: list of passtrhough devices' io port addresses. """ dev_list = [] - for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE: - vm_nodes = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']") - for vm_node in vm_nodes: - dev_list_per_vm = get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node) - dev_list = dev_list + dev_list_per_vm + vm_nodes = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']") + for vm_node in vm_nodes: + dev_list_per_vm = get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node) + dev_list = dev_list + dev_list_per_vm return dev_list def get_pci_hole_native(board_etree): @@ -421,11 +419,11 @@ def allocate_pci_bar(board_etree, scenario_etree, allocation_etree): used_low_mem = [] used_high_mem = [] - vm_type = common.get_node("./vm_type/text()", vm_node) - if vm_type is not None and lib.lib.is_pre_launched_vm(vm_type): + load_order = common.get_node("./load_order/text()", vm_node) + if load_order is not None and lib.lib.is_pre_launched_vm(load_order): low_mem = [AddrWindow(start = PRE_LAUNCHED_VM_LOW_MEM_START, end = PRE_LAUNCHED_VM_LOW_MEM_END - 1)] high_mem = [AddrWindow(start = PRE_LAUNCHED_VM_HIGH_MEM_START, end = PRE_LAUNCHED_VM_HIGH_MEM_END - 1)] - elif vm_type is not None and lib.lib.is_sos_vm(vm_type): + elif load_order is not None and lib.lib.is_service_vm(load_order): low_mem = native_low_mem high_mem = native_high_mem mem_passthrough = get_devs_mem_passthrough(board_etree, scenario_etree) @@ -435,7 +433,7 @@ def allocate_pci_bar(board_etree, scenario_etree, allocation_etree): used_low_mem = [mem for mem in used_low_mem_native if mem not in mem_passthrough] used_high_mem = [mem for mem in used_high_mem_native if mem not in mem_passthrough] else: - # fall into else when the vm_type is post-launched vm, no mmio allocation is needed + # fall into else when the load_order is post-launched vm, no mmio allocation is needed continue devdict_base_32_bits = alloc_addr(low_mem, devdict_32bits, used_low_mem, VBAR_ALIGNMENT) @@ -456,8 +454,8 @@ def allocate_io_port(board_etree, scenario_etree, allocation_etree): io_port_range_list = [] used_io_port_list = [] - vm_type = common.get_node("./vm_type/text()", vm_node) - if vm_type is not None and lib.lib.is_sos_vm(vm_type): + load_order = common.get_node("./load_order/text()", vm_node) + if load_order is not None and lib.lib.is_service_vm(load_order): io_port_range_list = io_port_range_list_native io_port_passthrough = get_pt_devs_io_port_passthrough(board_etree, scenario_etree) used_io_port_list_native = get_devs_io_port_native(board_etree, io_port_range_list_native) @@ -474,7 +472,7 @@ def allocate_ssram_region(board_etree, scenario_etree, allocation_etree): # Guest physical address of the SW SRAM allocated to a pre-launched VM enabled = common.get_node("//SSRAM_ENABLED/text()", scenario_etree) if enabled == "y": - pre_rt_vms = common.get_node("//vm[vm_type ='PRE_RT_VM']", scenario_etree) + pre_rt_vms = common.get_node("//vm[load_order = 'PRE_LAUNCHED_VM' and vm_type = 'RTVM']", scenario_etree) if pre_rt_vms is not None: vm_id = pre_rt_vms.get("id") l3_sw_sram = board_etree.xpath("//cache[@level='3']/capability[@id='Software SRAM']") diff --git a/misc/config_tools/static_allocators/intx.py b/misc/config_tools/static_allocators/intx.py index 38792502f..23f922913 100644 --- a/misc/config_tools/static_allocators/intx.py +++ b/misc/config_tools/static_allocators/intx.py @@ -39,12 +39,12 @@ def remove_irq(irq_list, irq): except ValueError as e: raise ValueError("Cannot remove irq:{} from available irq list:{}, {}". format(irq, e, irq_list)) from e -def create_vuart_irq_node(etree, vm_id, vm_type, vuart_id, irq): +def create_vuart_irq_node(etree, vm_id, load_order, vuart_id, irq): allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree) if allocation_vm_node is None: allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id) - if common.get_node("./vm_type", allocation_vm_node) is None: - common.append_node("./vm_type", vm_type, allocation_vm_node) + if common.get_node("./load_order", allocation_vm_node) is None: + common.append_node("./load_order", load_order, allocation_vm_node) if common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", allocation_vm_node) is None: common.append_node("./legacy_vuart", None, allocation_vm_node, id = vuart_id) @@ -75,12 +75,12 @@ def alloc_legacy_vuart_irqs(board_etree, scenario_etree, allocation_etree): vm_node_list = scenario_etree.xpath("//vm") for vm_node in vm_node_list: - vm_type = common.get_node("./vm_type/text()", vm_node) - irq_list = get_native_valid_irq() if vm_type == "SERVICE_VM" else [f"{d}" for d in list(range(1,15))] + load_order = common.get_node("./load_order/text()", vm_node) + irq_list = get_native_valid_irq() if load_order == "SERVICE_VM" else [f"{d}" for d in list(range(1,15))] legacy_vuart_id_list = vm_node.xpath("legacy_vuart[base != 'INVALID_COM_BASE']/@id") legacy_vuart_irq = '' for legacy_vuart_id in legacy_vuart_id_list: - if legacy_vuart_id == '0' and vm_type == "SERVICE_VM": + if legacy_vuart_id == '0' and load_order == "SERVICE_VM": if hv_debug_console in native_ttys.keys(): if native_ttys[hv_debug_console]['irq'] < LEGACY_IRQ_MAX: legacy_vuart_irq = native_ttys[hv_debug_console]['irq'] @@ -93,7 +93,7 @@ def alloc_legacy_vuart_irqs(board_etree, scenario_etree, allocation_etree): else: legacy_vuart_irq = assign_legacy_vuart_irqs(vm_node, legacy_vuart_id, irq_list) - create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), vm_type, legacy_vuart_id, legacy_vuart_irq) + create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), load_order, legacy_vuart_id, legacy_vuart_irq) def get_irqs_of_device(device_node): irqs = set() @@ -131,9 +131,9 @@ def alloc_device_irqs(board_etree, scenario_etree, allocation_etree): # Identify the interrupt lines each pre-launched VM uses # for vm in scenario_etree.xpath("//vm"): - vm_type = vm.find("vm_type").text + load_order = vm.find("load_order").text vm_id = int(vm.get("id")) - if lib.lib.is_pre_launched_vm(vm_type): + if lib.lib.is_pre_launched_vm(load_order): pt_intx_text = common.get_node("pt_intx/text()", vm) if pt_intx_text is not None: pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]")) @@ -157,7 +157,7 @@ def alloc_device_irqs(board_etree, scenario_etree, allocation_etree): for device in devices: print(f"\t{device}") raise lib.error.ResourceError(f"Pre-launched VM {vm_id} with LAPIC_PASSTHROUGH flag cannot use interrupt lines.") - elif lib.lib.is_sos_vm(vm_type): + elif lib.lib.is_service_vm(load_order): service_vm_id = vm_id # diff --git a/misc/config_tools/static_allocators/lib/lib.py b/misc/config_tools/static_allocators/lib/lib.py index 60c956767..acb7a8cfd 100644 --- a/misc/config_tools/static_allocators/lib/lib.py +++ b/misc/config_tools/static_allocators/lib/lib.py @@ -119,16 +119,16 @@ def get_ivshmem_enabled_by_tree(etree): return shmem_enabled def is_pre_launched_vm(vm_type): - if vm_type in PRE_LAUNCHED_VMS_TYPE: + if vm_type is 'PRE_LAUNCHED_VM': return True return False def is_post_launched_vm(vm_type): - if vm_type in POST_LAUNCHED_VMS_TYPE: + if vm_type is 'POST_LAUNCHED_VM': return True return False -def is_sos_vm(vm_type): - if vm_type in SERVICE_VM_TYPE: +def is_service_vm(vm_type): + if vm_type is 'SERVICE_VM': return True return False diff --git a/misc/config_tools/static_allocators/main.py b/misc/config_tools/static_allocators/main.py index 8a0f98441..f2550f194 100755 --- a/misc/config_tools/static_allocators/main.py +++ b/misc/config_tools/static_allocators/main.py @@ -17,7 +17,7 @@ def main(args): common.BOARD_INFO_FILE = args.board common.SCENARIO_INFO_FILE = args.scenario common.get_vm_num(args.scenario) - common.get_vm_types() + common.get_load_order() scripts_path = os.path.dirname(os.path.realpath(__file__)) current = os.path.basename(__file__) diff --git a/misc/config_tools/static_allocators/pio.py b/misc/config_tools/static_allocators/pio.py index 6a4d8c9d4..c8ad24b38 100644 --- a/misc/config_tools/static_allocators/pio.py +++ b/misc/config_tools/static_allocators/pio.py @@ -52,11 +52,11 @@ def fn(board_etree, scenario_etree, allocation_etree): vm_node_list = scenario_etree.xpath("//vm") for vm_node in vm_node_list: - vm_type = common.get_node("./vm_type/text()", vm_node) + load_order = common.get_node("./load_order/text()", vm_node) legacy_vuart_base = "" legacy_vuart_id_list = vm_node.xpath("legacy_vuart[base != 'INVALID_COM_BASE']/@id") for legacy_vuart_id in legacy_vuart_id_list: - if legacy_vuart_id == '0' and vm_type == "SERVICE_VM": + if legacy_vuart_id == '0' and load_order == "SERVICE_VM": if hv_debug_console in native_ttys.keys(): if native_ttys[hv_debug_console]['type'] == "portio": legacy_vuart_base = native_ttys[hv_debug_console]['base'] diff --git a/misc/config_tools/xforms/lib.xsl b/misc/config_tools/xforms/lib.xsl index aa399da4b..27bd7d174 100644 --- a/misc/config_tools/xforms/lib.xsl +++ b/misc/config_tools/xforms/lib.xsl @@ -319,7 +319,7 @@ - + @@ -353,7 +353,7 @@ - + @@ -455,49 +455,19 @@ - - - - - - - - - - + + + - - - - - - - - - - - - - - - + + - - - - - - - - - - - - + + diff --git a/misc/config_tools/xforms/misc_cfg.h.xsl b/misc/config_tools/xforms/misc_cfg.h.xsl index bf28266d2..b5a6dc9c0 100644 --- a/misc/config_tools/xforms/misc_cfg.h.xsl +++ b/misc/config_tools/xforms/misc_cfg.h.xsl @@ -34,7 +34,7 @@ - + @@ -84,7 +84,7 @@ - + @@ -93,8 +93,8 @@ - - + + @@ -118,8 +118,8 @@ - - + + @@ -177,7 +177,7 @@ - + @@ -203,7 +203,7 @@ - + diff --git a/misc/config_tools/xforms/pci_dev.c.xsl b/misc/config_tools/xforms/pci_dev.c.xsl index 8397962b5..250fe74b4 100644 --- a/misc/config_tools/xforms/pci_dev.c.xsl +++ b/misc/config_tools/xforms/pci_dev.c.xsl @@ -33,7 +33,7 @@ - + @@ -41,18 +41,18 @@ - + - + - + @@ -77,7 +77,7 @@ - + @@ -103,7 +103,7 @@ - + @@ -142,14 +142,14 @@ - + { - + diff --git a/misc/config_tools/xforms/pt_intx.c.xsl b/misc/config_tools/xforms/pt_intx.c.xsl index 5afad4945..465f72a91 100644 --- a/misc/config_tools/xforms/pt_intx.c.xsl +++ b/misc/config_tools/xforms/pt_intx.c.xsl @@ -42,7 +42,7 @@ - + diff --git a/misc/config_tools/xforms/vm_configurations.c.xsl b/misc/config_tools/xforms/vm_configurations.c.xsl index 30fe18d03..a6c642788 100644 --- a/misc/config_tools/xforms/vm_configurations.c.xsl +++ b/misc/config_tools/xforms/vm_configurations.c.xsl @@ -29,7 +29,7 @@ - + @@ -38,7 +38,7 @@ - + @@ -83,9 +83,9 @@ - + - + @@ -105,7 +105,7 @@ - + @@ -117,9 +117,25 @@ - - - , + + + + + + + + + + + + + + + + + + + @@ -129,7 +145,7 @@ - + @@ -178,7 +194,7 @@ - + @@ -216,10 +232,10 @@ - + - + @@ -229,7 +245,7 @@ - + }, @@ -258,7 +274,7 @@ - + @@ -271,7 +287,7 @@ - + diff --git a/misc/config_tools/xforms/vm_configurations.h.xsl b/misc/config_tools/xforms/vm_configurations.h.xsl index 04900da5f..b48fb8368 100644 --- a/misc/config_tools/xforms/vm_configurations.h.xsl +++ b/misc/config_tools/xforms/vm_configurations.h.xsl @@ -38,15 +38,15 @@ - - - + + + - - + + @@ -54,7 +54,7 @@ - +