mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 14:33:38 +00:00
config-tools: solve hv and vm memory address conflict
Fixed the problem that acrn can still build normally when the memory addresses of HV and VM conflict, which causes the hypervisor to hang. At the same time, defined a class to process memory to obtain and check the available memory range. Memory range obtain and check related functions are defined as class methods. Tracked-On: #7913 Reviewed-by: Junjie Mao <junjie.mao@intel.com> Signed-off-by: Ziheng Li <ziheng.li@intel.com>
This commit is contained in:
parent
5bc94cf08a
commit
22184ac99a
@ -11,46 +11,57 @@ import lib.error
|
||||
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
|
||||
import common, math, logging
|
||||
|
||||
def import_memory_info(board_etree):
|
||||
class RamRange():
|
||||
ram_range = {}
|
||||
for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
|
||||
start = int(memory_range.get("start"), base=16)
|
||||
size = int(memory_range.get("size"), base=10)
|
||||
ram_range[start] = size
|
||||
|
||||
return ram_range
|
||||
@classmethod
|
||||
def import_memory_info(cls, board_etree, allocation_etree):
|
||||
hv_start = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_START/text()", allocation_etree), 16)
|
||||
hv_size = int(common.get_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE/text()", allocation_etree), 16)
|
||||
for memory_range in board_etree.xpath("/acrn-config/memory/range[not(@id) or @id = 'RAM']"):
|
||||
start = int(memory_range.get("start"), base=16)
|
||||
size = int(memory_range.get("size"), base=10)
|
||||
if start < hv_start and start + size > hv_start + hv_size:
|
||||
cls.ram_range[start] = hv_start - start
|
||||
cls.ram_range[hv_start + hv_size] = start + size - hv_start - hv_size
|
||||
else:
|
||||
cls.ram_range[start] = size
|
||||
|
||||
def check_hpa(vm_node_info):
|
||||
hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*")
|
||||
hpa_node_list_new = []
|
||||
for hpa_node in hpa_node_list:
|
||||
if int(hpa_node.text, 16) != 0:
|
||||
hpa_node_list_new.append(hpa_node)
|
||||
return cls.ram_range
|
||||
|
||||
return hpa_node_list_new
|
||||
|
||||
def get_memory_info(vm_node_info):
|
||||
start_hpa = []
|
||||
size_hpa = []
|
||||
hpa_info = {}
|
||||
|
||||
size_node = common.get_node("./memory/size", vm_node_info)
|
||||
if size_node is not None:
|
||||
size_byte = int(size_node.text) * 0x100000
|
||||
hpa_info[0] = size_byte
|
||||
hpa_node_list = check_hpa(vm_node_info)
|
||||
if len(hpa_node_list) != 0:
|
||||
@classmethod
|
||||
def check_hpa(cls, vm_node_info):
|
||||
hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*")
|
||||
hpa_node_list_new = []
|
||||
for hpa_node in hpa_node_list:
|
||||
if hpa_node.tag == "start_hpa":
|
||||
start_hpa.append(int(hpa_node.text, 16))
|
||||
elif hpa_node.tag == "size_hpa":
|
||||
size_byte = int(hpa_node.text) * 0x100000
|
||||
size_hpa.append(size_byte)
|
||||
if len(start_hpa) != 0 and len(start_hpa) == len(start_hpa):
|
||||
for i in range(len(start_hpa)):
|
||||
hpa_info[start_hpa[i]] = size_hpa[i]
|
||||
if int(hpa_node.text, base=16) != 0:
|
||||
hpa_node_list_new.append(hpa_node)
|
||||
|
||||
return hpa_info
|
||||
return hpa_node_list_new
|
||||
|
||||
@classmethod
|
||||
def get_memory_info(cls, vm_node_info):
|
||||
start_hpa = []
|
||||
size_hpa = []
|
||||
hpa_info = {}
|
||||
|
||||
size_node = common.get_node("./memory/size", vm_node_info)
|
||||
if size_node is not None:
|
||||
size_byte = int(size_node.text) * 0x100000
|
||||
hpa_info[0] = size_byte
|
||||
hpa_node_list = RamRange().check_hpa(vm_node_info)
|
||||
if len(hpa_node_list) != 0:
|
||||
for hpa_node in hpa_node_list:
|
||||
if hpa_node.tag == "start_hpa":
|
||||
start_hpa.append(int(hpa_node.text, 16))
|
||||
elif hpa_node.tag == "size_hpa":
|
||||
size_byte = int(hpa_node.text) * 0x100000
|
||||
size_hpa.append(size_byte)
|
||||
if len(start_hpa) != 0 and len(start_hpa) == len(start_hpa):
|
||||
for i in range(len(start_hpa)):
|
||||
hpa_info[start_hpa[i]] = size_hpa[i]
|
||||
|
||||
return hpa_info
|
||||
|
||||
def alloc_memory(scenario_etree, ram_range_info):
|
||||
vm_node_list = scenario_etree.xpath("/acrn-config/vm[load_order = 'PRE_LAUNCHED_VM']")
|
||||
@ -62,7 +73,7 @@ def alloc_memory(scenario_etree, ram_range_info):
|
||||
ram_range_info.pop(key)
|
||||
|
||||
for vm_node in vm_node_list:
|
||||
mem_info = get_memory_info(vm_node)
|
||||
mem_info = RamRange().get_memory_info(vm_node)
|
||||
mem_info_list.append(mem_info)
|
||||
vm_node_index_list.append(vm_node.attrib["id"])
|
||||
|
||||
@ -77,20 +88,22 @@ def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list):
|
||||
mem_key = sorted(ram_range_info)
|
||||
for mem_start in mem_key:
|
||||
mem_size = ram_range_info[mem_start]
|
||||
mem_end = mem_start + mem_size
|
||||
for hpa_start in hpa_key:
|
||||
hpa_size = mem_info_list[vm_index][hpa_start]
|
||||
hpa_end = hpa_start + hpa_size
|
||||
if hpa_start != 0:
|
||||
if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size:
|
||||
if mem_start < hpa_start and mem_end > hpa_end:
|
||||
ram_range_info[mem_start] = hpa_start - mem_start
|
||||
ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
|
||||
elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size:
|
||||
ram_range_info[hpa_end] = mem_end - hpa_end
|
||||
elif mem_start == hpa_start and mem_end > hpa_end:
|
||||
del ram_range_info[mem_start]
|
||||
ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
|
||||
elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size:
|
||||
ram_range_info[hpa_end] = mem_end - hpa_end
|
||||
elif mem_start < hpa_start and mem_end == hpa_end:
|
||||
ram_range_info[mem_start] = hpa_start - mem_start
|
||||
elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size:
|
||||
elif mem_start == hpa_start and mem_end == hpa_end:
|
||||
del ram_range_info[mem_start]
|
||||
elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size:
|
||||
elif mem_start > hpa_start or mem_end < hpa_end:
|
||||
raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.")
|
||||
elif mem_size < hpa_size:
|
||||
raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.")
|
||||
@ -147,14 +160,14 @@ def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list):
|
||||
region_index = region_index + 1
|
||||
|
||||
def alloc_vm_memory(board_etree, scenario_etree, allocation_etree):
|
||||
ram_range_info = import_memory_info(board_etree)
|
||||
ram_range_info = RamRange().import_memory_info(board_etree, allocation_etree)
|
||||
ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info)
|
||||
write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list)
|
||||
|
||||
def allocate_hugepages(board_etree, scenario_etree, allocation_etree):
|
||||
hugepages_1gb = 0
|
||||
hugepages_2mb = 0
|
||||
ram_range_info = import_memory_info(board_etree)
|
||||
ram_range_info = RamRange().ram_range
|
||||
total_hugepages = int(sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)*0.98/(1024*1024*1024) \
|
||||
- sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \
|
||||
- 5 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu")))
|
||||
|
Loading…
Reference in New Issue
Block a user