mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 13:08:42 +00:00
misc: add vUART channel for s5 feature
"S5" is one of the ACPI sleep states which means the function to shut down the VMs. On ACRN, the User VM must be shut down before powering off the Service VM, so we need a vUART channel to communicate between the Service VM and User VMs. This patch adds a vUART channel for each User VM connect to Service VM Tracked-On: #8782 Signed-off-by: Chenli Wei <chenli.wei@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
d625ce0677
commit
2045577f12
@ -17,7 +17,7 @@ VUART_DEV_NAME_NUM = 8
|
||||
stadard_uart_port = {'0x3F8', '0x2F8', '0x3E8', '0x2E8'}
|
||||
UART_IRQ_BAUD = " irq 0 uart 16550A baud_base 115200"
|
||||
|
||||
def find_non_standard_uart(vm, scenario_etree):
|
||||
def find_non_standard_uart(vm, scenario_etree, allocation_etree):
|
||||
uart_list = []
|
||||
vmname = common.get_node("./name/text()", vm)
|
||||
|
||||
@ -30,7 +30,17 @@ def find_non_standard_uart(vm, scenario_etree):
|
||||
|
||||
port = common.get_node(f".//endpoint[vm_name = '{vmname}']/io_port/text()", connection)
|
||||
if port not in stadard_uart_port:
|
||||
uart_list.append(connection)
|
||||
target_vm_name = common.get_node(f".//endpoint[vm_name != '{vmname}']/vm_name/text()", connection)
|
||||
target_vm_id = common.get_node(f"//vm[name = '{target_vm_name}']/@id", scenario_etree)
|
||||
uart_list.append({"io_port" : port, "target_vm_id" : target_vm_id})
|
||||
|
||||
legacy_uart_list = allocation_etree.xpath(f"//vm[load_order = 'SERVICE_VM']/legacy_vuart")
|
||||
for legacy_uart in legacy_uart_list:
|
||||
port = common.get_node(f"./addr.port_base/text()", legacy_uart)
|
||||
if port is None:
|
||||
continue
|
||||
elif port not in stadard_uart_port:
|
||||
uart_list.append({"io_port" : port, "target_vm_id": common.get_node(f"./t_vuart.vm_id/text()", legacy_uart)})
|
||||
|
||||
return uart_list
|
||||
|
||||
@ -45,23 +55,13 @@ def main(args):
|
||||
|
||||
vm_list = scenario_etree.xpath("//vm[load_order = 'SERVICE_VM']")
|
||||
for vm in vm_list:
|
||||
vmname = common.get_node("./name/text()", vm)
|
||||
for connection in scenario_etree.xpath(f"//vuart_connection[endpoint/vm_name = '{vmname}']"):
|
||||
vm_name = common.get_node(f".//endpoint[vm_name != '{vmname}']/vm_name/text()", connection)
|
||||
for target_vm in scenario_etree.xpath(f"//vm[name = '{vm_name}']"):
|
||||
vuart_target_vmid[connection.find('name').text] = target_vm.attrib["id"]
|
||||
|
||||
vm_list = scenario_etree.xpath("//vm[load_order = 'SERVICE_VM']")
|
||||
for vm in vm_list:
|
||||
vuart_list = find_non_standard_uart(vm, scenario_etree)
|
||||
vuart_list = find_non_standard_uart(vm, scenario_etree, allocation_etree)
|
||||
vmname = common.get_node("./name/text()", vm)
|
||||
if len(vuart_list) != 0:
|
||||
with open(args.out, "w+") as config_f:
|
||||
for uart_start_num, vuart in enumerate(vuart_list, start=START_VUART_DEV_NAME_NO):
|
||||
port = common.get_node(f".//endpoint[vm_name = '{vmname}']/io_port/text()", vuart)
|
||||
base = " port " + str(port)
|
||||
connection_name = vuart.find('name').text
|
||||
vm_id_note = "# User_VM_id: " + str(vuart_target_vmid[connection_name]) + '\n'
|
||||
base = " port " + vuart["io_port"]
|
||||
vm_id_note = "# User_VM_id: " + str(vuart["target_vm_id"])+ '\n'
|
||||
config_f.write(vm_id_note)
|
||||
conf = "/dev/ttyS" + str(uart_start_num) + base + UART_IRQ_BAUD + '\n'
|
||||
config_f.write(conf)
|
||||
|
@ -55,11 +55,30 @@ def alloc_vuart_connection_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
hv_debug_console = lib.lib.parse_hv_console(scenario_etree)
|
||||
|
||||
vm_node_list = scenario_etree.xpath("//vm")
|
||||
user_vm_list = scenario_etree.xpath("//vm[load_order != 'SERVICE_VM']/name/text()")
|
||||
service_vm_id = common.get_node(f"//vm[load_order = 'SERVICE_VM']/@id", scenario_etree)
|
||||
if service_vm_id is not None:
|
||||
for index in range(0, len(user_vm_list)):
|
||||
vuart_id = index + 1
|
||||
create_vuart_irq_node(allocation_etree, service_vm_id, "SERVICE_VM", str(vuart_id), "0")
|
||||
|
||||
for vm_node in vm_node_list:
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
irq_list = get_native_valid_irq() if load_order == "SERVICE_VM" else [f"{d}" for d in list(range(1,15))]
|
||||
vuart_id = '1'
|
||||
|
||||
if load_order != "SERVICE_VM":
|
||||
vuart_id = 1
|
||||
else:
|
||||
vuart_id = 1 + len(user_vm_list)
|
||||
|
||||
vmname = common.get_node("./name/text()", vm_node)
|
||||
|
||||
# Allocate irq for S5 vuart
|
||||
if load_order != "SERVICE_VM":
|
||||
legacy_vuart_irq = alloc_irq(irq_list)
|
||||
create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), load_order, str(vuart_id), legacy_vuart_irq)
|
||||
vuart_id = vuart_id + 1
|
||||
|
||||
vuart_connections = scenario_etree.xpath("//vuart_connection")
|
||||
for connection in vuart_connections:
|
||||
endpoint_list = connection.xpath(".//endpoint")
|
||||
@ -67,8 +86,8 @@ def alloc_vuart_connection_irqs(board_etree, scenario_etree, allocation_etree):
|
||||
vm_name = common.get_node("./vm_name/text()",endpoint)
|
||||
if vm_name == vmname:
|
||||
legacy_vuart_irq = alloc_irq(irq_list)
|
||||
create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), load_order, vuart_id, legacy_vuart_irq)
|
||||
vuart_id = str(int(vuart_id) + 1)
|
||||
create_vuart_irq_node(allocation_etree, common.get_node("./@id", vm_node), load_order, str(vuart_id), legacy_vuart_irq)
|
||||
vuart_id = vuart_id + 1
|
||||
|
||||
def get_irqs_of_device(device_node):
|
||||
irqs = set()
|
||||
|
90
misc/config_tools/static_allocators/s5_vuart.py
Normal file
90
misc/config_tools/static_allocators/s5_vuart.py
Normal file
@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2022 Intel Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
import sys
|
||||
import common, lib.error
|
||||
|
||||
standard_uart_port = ['0x3F8', '0x2F8', '0x3E8', '0x2E8']
|
||||
|
||||
# The COM1 was used for console vUART, so we alloc io_port frome COM2~COM4
|
||||
service_port_list = list(range(0x9000, 0x9100, 8))
|
||||
|
||||
def create_s5_vuart_connection(allocation_etree, service_vm_name, service_vm_port, user_vm_name, user_vm_port):
|
||||
vuart_connections_node = common.get_node(f"/acrn-config/hv/vuart_connections", allocation_etree)
|
||||
if vuart_connections_node is None:
|
||||
vuart_connections_node = common.append_node("/acrn-config/hv/vuart_connections", None, allocation_etree)
|
||||
|
||||
connection_name = service_vm_name + "_" + user_vm_name
|
||||
|
||||
vuart_connection_node = common.append_node(f"./vuart_connection", None, vuart_connections_node)
|
||||
common.append_node(f"./name", connection_name, vuart_connection_node)
|
||||
common.append_node(f"./type", "type", vuart_connection_node)
|
||||
|
||||
service_vm_endpoint = common.append_node(f"./endpoint", None, vuart_connection_node)
|
||||
common.append_node(f"./vm_name", service_vm_name, service_vm_endpoint)
|
||||
common.append_node(f"./io_port", service_vm_port, service_vm_endpoint)
|
||||
|
||||
user_vm_endpoint = common.append_node(f"./endpoint", None, vuart_connection_node)
|
||||
common.append_node(f"./vm_name", user_vm_name, user_vm_endpoint)
|
||||
common.append_node(f"./io_port", user_vm_port, user_vm_endpoint)
|
||||
|
||||
def get_console_vuart_port(scenario_etree, vm_name):
|
||||
port = common.get_node(f"//vm[name = '{vm_name}']/console_vuart/text()", scenario_etree)
|
||||
|
||||
if port == "COM Port 1":
|
||||
port = "0x3F8U"
|
||||
elif port == "COM Port 2":
|
||||
port = "0x2F8U"
|
||||
elif port == "COM Port 3":
|
||||
port = "0x3E8U"
|
||||
elif port == "COM Port 4":
|
||||
port = "0x2E8U"
|
||||
|
||||
return port
|
||||
|
||||
def alloc_free_port(scenario_etree, load_order, vm_name):
|
||||
port_list = scenario_etree.xpath(f"//endpoint[vm_name = '{vm_name}']/io_port/text()")
|
||||
console_port = get_console_vuart_port(scenario_etree, vm_name)
|
||||
if console_port is not None:
|
||||
port_list.append(console_port.replace("U", ""))
|
||||
|
||||
if load_order == "SERVICE_VM":
|
||||
tmp_list = []
|
||||
for port in port_list:
|
||||
tmp_list.append(int(port, 16))
|
||||
|
||||
port_list = list(set(service_port_list) - set(tmp_list))
|
||||
port = hex(service_port_list[0])
|
||||
service_port_list.remove(port_list[0])
|
||||
return str(port).upper()
|
||||
else:
|
||||
tmp_list = list(set(standard_uart_port) - set(port_list))
|
||||
try:
|
||||
port = tmp_list[0]
|
||||
return port
|
||||
except IndexError as e:
|
||||
raise lib.error.ResourceError("Cannot allocate legacy io port: {}, {}".format(e, port_list)) from e
|
||||
|
||||
def alloc_vuart_connection_info(board_etree, scenario_etree, allocation_etree):
|
||||
user_vm_list = scenario_etree.xpath(f"//vm[load_order != 'SERVICE_VM']")
|
||||
service_vm_id = common.get_node(f"//vm[load_order = 'SERVICE_VM']/@id", scenario_etree)
|
||||
service_vm_name = common.get_node(f"//vm[load_order = 'SERVICE_VM']/name/text()", scenario_etree)
|
||||
|
||||
if (service_vm_id is None) or (service_vm_name is None):
|
||||
return
|
||||
|
||||
for index,vm_node in enumerate(user_vm_list):
|
||||
vm_id = common.get_node("./@id", vm_node)
|
||||
load_order = common.get_node("./load_order/text()", vm_node)
|
||||
user_vm_name = common.get_node(f"./name/text()", vm_node)
|
||||
service_vm_port = alloc_free_port(scenario_etree, "SERVICE_VM", user_vm_name)
|
||||
user_vm_port = alloc_free_port(scenario_etree, load_order, user_vm_name)
|
||||
|
||||
create_s5_vuart_connection(allocation_etree, service_vm_name, service_vm_port, user_vm_name, user_vm_port)
|
||||
|
||||
def fn(board_etree, scenario_etree, allocation_etree):
|
||||
alloc_vuart_connection_info(board_etree, scenario_etree, allocation_etree)
|
Loading…
Reference in New Issue
Block a user