mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-05 15:45:05 +00:00
config tool: add load_order and redefine vm_type
This patch includes: 1.add load_order(PRE_LAUNCHED_VM/SERVICE_VM/POST_LAUNCHED_VM) parameter 2.change vm_type parameter to values as RTVM, STANDARD_VM, TEE, REE TEE and REE are hide in UI. 3.deduce vm severity in vm_configuration from vm_type and load_order This patch not includes: change for scenario_config and functions called by scenario_config about checking v2->v3: *Refine template load_order v1->v2: *Change variable name from vm_type to load_order *Change LoadOptionType to LoadOrderType *Change VMOptionsType to VMType *Add TEE_VM/REE_VM description *Refine acrn:is-pre-launched-vm Tracked-On: #6690 Signed-off-by: hangliu1 <hang1.liu@linux.intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
@@ -95,11 +95,10 @@ def get_devs_bdf_passthrough(scenario_etree):
|
||||
return: list of passtrhough devices' bdf.
|
||||
"""
|
||||
dev_list = []
|
||||
for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE:
|
||||
pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()")
|
||||
for pt_dev in pt_devs:
|
||||
bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0])
|
||||
dev_list.append(bdf)
|
||||
pt_devs = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']/pci_devs/pci_dev/text()")
|
||||
for pt_dev in pt_devs:
|
||||
bdf = lib.lib.BusDevFunc.from_str(pt_dev.split()[0])
|
||||
dev_list.append(bdf)
|
||||
return dev_list
|
||||
|
||||
def create_device_node(allocation_etree, vm_id, devdict):
|
||||
@@ -141,11 +140,11 @@ def fn(board_etree, scenario_etree, allocation_etree):
|
||||
vm_id = vm_node.get('id')
|
||||
devdict = {}
|
||||
used = []
|
||||
vm_type = common.get_node("./vm_type/text()", vm_node)
|
||||
if vm_type is not None and lib.lib.is_post_launched_vm(vm_type):
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
if load_order is not None and lib.lib.is_post_launched_vm(load_order):
|
||||
continue
|
||||
|
||||
if vm_type is not None and lib.lib.is_sos_vm(vm_type):
|
||||
if load_order is not None and lib.lib.is_service_vm(load_order):
|
||||
native_used = get_devs_bdf_native(board_etree)
|
||||
passthrough_used = get_devs_bdf_passthrough(scenario_etree)
|
||||
used = [bdf for bdf in native_used if bdf not in passthrough_used]
|
||||
|
||||
@@ -10,14 +10,14 @@ sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '
|
||||
import common, board_cfg_lib
|
||||
|
||||
def sos_cpu_affinity(etree):
|
||||
if common.get_node("//vm[vm_type = 'SERVICE_VM']", etree) is None:
|
||||
if common.get_node("//vm[load_order = 'SERVICE_VM']", etree) is None:
|
||||
return None
|
||||
|
||||
if common.get_node("//vm[vm_type = 'SERVICE_VM' and count(cpu_affinity)]", etree) is not None:
|
||||
if common.get_node("//vm[load_order = 'SERVICE_VM' and count(cpu_affinity)]", etree) is not None:
|
||||
return None
|
||||
|
||||
sos_extend_all_cpus = board_cfg_lib.get_processor_info()
|
||||
pre_all_cpus = etree.xpath("//vm[vm_type = 'PRE_RT_VM' or vm_type = 'PRE_STD_VM' or vm_type = 'SAFETY_VM']/cpu_affinity/pcpu_id/text()")
|
||||
pre_all_cpus = etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/cpu_affinity/pcpu_id/text()")
|
||||
|
||||
cpus_for_sos = list(set(sos_extend_all_cpus) - set(pre_all_cpus))
|
||||
return sorted(cpus_for_sos)
|
||||
@@ -25,12 +25,12 @@ def sos_cpu_affinity(etree):
|
||||
def fn(board_etree, scenario_etree, allocation_etree):
|
||||
cpus_for_sos = sos_cpu_affinity(scenario_etree)
|
||||
if cpus_for_sos:
|
||||
if common.get_node("//vm[vm_type = 'SERVICE_VM']", scenario_etree) is not None:
|
||||
vm_id = common.get_node("//vm[vm_type = 'SERVICE_VM']/@id", scenario_etree)
|
||||
if common.get_node("//vm[load_order = 'SERVICE_VM']", scenario_etree) is not None:
|
||||
vm_id = common.get_node("//vm[load_order = 'SERVICE_VM']/@id", scenario_etree)
|
||||
allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id='{vm_id}']", allocation_etree)
|
||||
if allocation_sos_vm_node is None:
|
||||
allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
|
||||
if common.get_node("./vm_type", allocation_sos_vm_node) is None:
|
||||
common.append_node("./vm_type", "SERVICE_VM", allocation_sos_vm_node)
|
||||
if common.get_node("./load_order", allocation_sos_vm_node) is None:
|
||||
common.append_node("./load_order", "SERVICE_VM", allocation_sos_vm_node)
|
||||
for pcpu_id in cpus_for_sos:
|
||||
common.append_node("./cpu_affinity/pcpu_id", str(pcpu_id), allocation_sos_vm_node)
|
||||
|
||||
@@ -252,19 +252,18 @@ def get_devs_mem_passthrough(board_etree, scenario_etree):
|
||||
return: list of passtrhough devices' mmio windows.
|
||||
"""
|
||||
dev_list = []
|
||||
for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE:
|
||||
pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()")
|
||||
for pt_dev in pt_devs:
|
||||
bdf = pt_dev.split()[0]
|
||||
bus = int(bdf.split(':')[0], 16)
|
||||
dev = int(bdf.split(":")[1].split('.')[0], 16)
|
||||
func = int(bdf.split(":")[1].split('.')[1], 16)
|
||||
resources = board_etree.xpath(f"//bus[@address = '{hex(bus)}']/device[@address = '{hex((dev << 16) | func)}'] \
|
||||
/resource[@type = 'memory' and @len != '0x0' and @width]")
|
||||
for resource in resources:
|
||||
start = resource.get('min')
|
||||
end = resource.get('max')
|
||||
dev_list.append(AddrWindow(int(start, 16), int(end, 16)))
|
||||
pt_devs = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']/pci_devs/pci_dev/text()")
|
||||
for pt_dev in pt_devs:
|
||||
bdf = pt_dev.split()[0]
|
||||
bus = int(bdf.split(':')[0], 16)
|
||||
dev = int(bdf.split(":")[1].split('.')[0], 16)
|
||||
func = int(bdf.split(":")[1].split('.')[1], 16)
|
||||
resources = board_etree.xpath(f"//bus[@address = '{hex(bus)}']/device[@address = '{hex((dev << 16) | func)}'] \
|
||||
/resource[@type = 'memory' and @len != '0x0' and @width]")
|
||||
for resource in resources:
|
||||
start = resource.get('min')
|
||||
end = resource.get('max')
|
||||
dev_list.append(AddrWindow(int(start, 16), int(end, 16)))
|
||||
return dev_list
|
||||
|
||||
def get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node):
|
||||
@@ -293,11 +292,10 @@ def get_pt_devs_io_port_passthrough(board_etree, scenario_etree):
|
||||
return: list of passtrhough devices' io port addresses.
|
||||
"""
|
||||
dev_list = []
|
||||
for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE:
|
||||
vm_nodes = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']")
|
||||
for vm_node in vm_nodes:
|
||||
dev_list_per_vm = get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node)
|
||||
dev_list = dev_list + dev_list_per_vm
|
||||
vm_nodes = scenario_etree.xpath(f"//vm[load_order = 'PRE_LAUNCHED_VM']")
|
||||
for vm_node in vm_nodes:
|
||||
dev_list_per_vm = get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node)
|
||||
dev_list = dev_list + dev_list_per_vm
|
||||
return dev_list
|
||||
|
||||
def get_pci_hole_native(board_etree):
|
||||
@@ -421,11 +419,11 @@ def allocate_pci_bar(board_etree, scenario_etree, allocation_etree):
|
||||
used_low_mem = []
|
||||
used_high_mem = []
|
||||
|
||||
vm_type = common.get_node("./vm_type/text()", vm_node)
|
||||
if vm_type is not None and lib.lib.is_pre_launched_vm(vm_type):
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
if load_order is not None and lib.lib.is_pre_launched_vm(load_order):
|
||||
low_mem = [AddrWindow(start = PRE_LAUNCHED_VM_LOW_MEM_START, end = PRE_LAUNCHED_VM_LOW_MEM_END - 1)]
|
||||
high_mem = [AddrWindow(start = PRE_LAUNCHED_VM_HIGH_MEM_START, end = PRE_LAUNCHED_VM_HIGH_MEM_END - 1)]
|
||||
elif vm_type is not None and lib.lib.is_sos_vm(vm_type):
|
||||
elif load_order is not None and lib.lib.is_service_vm(load_order):
|
||||
low_mem = native_low_mem
|
||||
high_mem = native_high_mem
|
||||
mem_passthrough = get_devs_mem_passthrough(board_etree, scenario_etree)
|
||||
@@ -435,7 +433,7 @@ def allocate_pci_bar(board_etree, scenario_etree, allocation_etree):
|
||||
used_low_mem = [mem for mem in used_low_mem_native if mem not in mem_passthrough]
|
||||
used_high_mem = [mem for mem in used_high_mem_native if mem not in mem_passthrough]
|
||||
else:
|
||||
# fall into else when the vm_type is post-launched vm, no mmio allocation is needed
|
||||
# fall into else when the load_order is post-launched vm, no mmio allocation is needed
|
||||
continue
|
||||
|
||||
devdict_base_32_bits = alloc_addr(low_mem, devdict_32bits, used_low_mem, VBAR_ALIGNMENT)
|
||||
@@ -456,8 +454,8 @@ def allocate_io_port(board_etree, scenario_etree, allocation_etree):
|
||||
io_port_range_list = []
|
||||
used_io_port_list = []
|
||||
|
||||
vm_type = common.get_node("./vm_type/text()", vm_node)
|
||||
if vm_type is not None and lib.lib.is_sos_vm(vm_type):
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
if load_order is not None and lib.lib.is_service_vm(load_order):
|
||||
io_port_range_list = io_port_range_list_native
|
||||
io_port_passthrough = get_pt_devs_io_port_passthrough(board_etree, scenario_etree)
|
||||
used_io_port_list_native = get_devs_io_port_native(board_etree, io_port_range_list_native)
|
||||
@@ -474,7 +472,7 @@ def allocate_ssram_region(board_etree, scenario_etree, allocation_etree):
|
||||
# Guest physical address of the SW SRAM allocated to a pre-launched VM
|
||||
enabled = common.get_node("//SSRAM_ENABLED/text()", scenario_etree)
|
||||
if enabled == "y":
|
||||
pre_rt_vms = common.get_node("//vm[vm_type ='PRE_RT_VM']", scenario_etree)
|
||||
pre_rt_vms = common.get_node("//vm[load_order = 'PRE_LAUNCHED_VM' and vm_type = 'RTVM']", scenario_etree)
|
||||
if pre_rt_vms is not None:
|
||||
vm_id = pre_rt_vms.get("id")
|
||||
l3_sw_sram = board_etree.xpath("//cache[@level='3']/capability[@id='Software SRAM']")
|
||||
|
||||
@@ -39,12 +39,12 @@ def remove_irq(irq_list, irq):
|
||||
except ValueError as e:
|
||||
raise ValueError("Cannot remove irq:{} from available irq list:{}, {}". format(irq, e, irq_list)) from e
|
||||
|
||||
def create_vuart_irq_node(etree, vm_id, vm_type, vuart_id, irq):
|
||||
def create_vuart_irq_node(etree, vm_id, load_order, vuart_id, irq):
|
||||
allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
|
||||
if allocation_vm_node is None:
|
||||
allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
|
||||
if common.get_node("./vm_type", allocation_vm_node) is None:
|
||||
common.append_node("./vm_type", vm_type, allocation_vm_node)
|
||||
if common.get_node("./load_order", allocation_vm_node) is None:
|
||||
common.append_node("./load_order", load_order, allocation_vm_node)
|
||||
if common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", allocation_vm_node) is None:
|
||||
common.append_node("./legacy_vuart", None, allocation_vm_node, id = vuart_id)
|
||||
|
||||
@@ -75,12 +75,12 @@ def alloc_legacy_vuart_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
|
||||
vm_node_list = scenario_etree.xpath("//vm")
|
||||
for vm_node in vm_node_list:
|
||||
vm_type = common.get_node("./vm_type/text()", vm_node)
|
||||
irq_list = get_native_valid_irq() if vm_type == "SERVICE_VM" else [f"{d}" for d in list(range(1,15))]
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
irq_list = get_native_valid_irq() if load_order == "SERVICE_VM" else [f"{d}" for d in list(range(1,15))]
|
||||
legacy_vuart_id_list = vm_node.xpath("legacy_vuart[base != 'INVALID_COM_BASE']/@id")
|
||||
legacy_vuart_irq = ''
|
||||
for legacy_vuart_id in legacy_vuart_id_list:
|
||||
if legacy_vuart_id == '0' and vm_type == "SERVICE_VM":
|
||||
if legacy_vuart_id == '0' and load_order == "SERVICE_VM":
|
||||
if hv_debug_console in native_ttys.keys():
|
||||
if native_ttys[hv_debug_console]['irq'] < LEGACY_IRQ_MAX:
|
||||
legacy_vuart_irq = native_ttys[hv_debug_console]['irq']
|
||||
@@ -93,7 +93,7 @@ def alloc_legacy_vuart_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
else:
|
||||
legacy_vuart_irq = assign_legacy_vuart_irqs(vm_node, legacy_vuart_id, irq_list)
|
||||
|
||||
create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), vm_type, legacy_vuart_id, legacy_vuart_irq)
|
||||
create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), load_order, legacy_vuart_id, legacy_vuart_irq)
|
||||
|
||||
def get_irqs_of_device(device_node):
|
||||
irqs = set()
|
||||
@@ -131,9 +131,9 @@ def alloc_device_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
# Identify the interrupt lines each pre-launched VM uses
|
||||
#
|
||||
for vm in scenario_etree.xpath("//vm"):
|
||||
vm_type = vm.find("vm_type").text
|
||||
load_order = vm.find("load_order").text
|
||||
vm_id = int(vm.get("id"))
|
||||
if lib.lib.is_pre_launched_vm(vm_type):
|
||||
if lib.lib.is_pre_launched_vm(load_order):
|
||||
pt_intx_text = common.get_node("pt_intx/text()", vm)
|
||||
if pt_intx_text is not None:
|
||||
pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]"))
|
||||
@@ -157,7 +157,7 @@ def alloc_device_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
for device in devices:
|
||||
print(f"\t{device}")
|
||||
raise lib.error.ResourceError(f"Pre-launched VM {vm_id} with LAPIC_PASSTHROUGH flag cannot use interrupt lines.")
|
||||
elif lib.lib.is_sos_vm(vm_type):
|
||||
elif lib.lib.is_service_vm(load_order):
|
||||
service_vm_id = vm_id
|
||||
|
||||
#
|
||||
|
||||
@@ -119,16 +119,16 @@ def get_ivshmem_enabled_by_tree(etree):
|
||||
return shmem_enabled
|
||||
|
||||
def is_pre_launched_vm(vm_type):
|
||||
if vm_type in PRE_LAUNCHED_VMS_TYPE:
|
||||
if vm_type is 'PRE_LAUNCHED_VM':
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_post_launched_vm(vm_type):
|
||||
if vm_type in POST_LAUNCHED_VMS_TYPE:
|
||||
if vm_type is 'POST_LAUNCHED_VM':
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_sos_vm(vm_type):
|
||||
if vm_type in SERVICE_VM_TYPE:
|
||||
def is_service_vm(vm_type):
|
||||
if vm_type is 'SERVICE_VM':
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -17,7 +17,7 @@ def main(args):
|
||||
common.BOARD_INFO_FILE = args.board
|
||||
common.SCENARIO_INFO_FILE = args.scenario
|
||||
common.get_vm_num(args.scenario)
|
||||
common.get_vm_types()
|
||||
common.get_load_order()
|
||||
|
||||
scripts_path = os.path.dirname(os.path.realpath(__file__))
|
||||
current = os.path.basename(__file__)
|
||||
|
||||
@@ -52,11 +52,11 @@ def fn(board_etree, scenario_etree, allocation_etree):
|
||||
|
||||
vm_node_list = scenario_etree.xpath("//vm")
|
||||
for vm_node in vm_node_list:
|
||||
vm_type = common.get_node("./vm_type/text()", vm_node)
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
legacy_vuart_base = ""
|
||||
legacy_vuart_id_list = vm_node.xpath("legacy_vuart[base != 'INVALID_COM_BASE']/@id")
|
||||
for legacy_vuart_id in legacy_vuart_id_list:
|
||||
if legacy_vuart_id == '0' and vm_type == "SERVICE_VM":
|
||||
if legacy_vuart_id == '0' and load_order == "SERVICE_VM":
|
||||
if hv_debug_console in native_ttys.keys():
|
||||
if native_ttys[hv_debug_console]['type'] == "portio":
|
||||
legacy_vuart_base = native_ttys[hv_debug_console]['base']
|
||||
|
||||
Reference in New Issue
Block a user