mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-21 05:02:24 +00:00
acrn-config: support generation of per vcpu clos configuraton
Added "vcpu_clos" to configuration XML, here is an example of VM2 with 2 vCPUs: <clos desc="Class of Service for Cache Allocation Technology. Please refer SDM 17.19.2 for details and use with caution."> <vcpu_clos>1</vcpu_clos> <vcpu_clos>0</vcpu_clos> </clos> A macro will be generated in vm_configuration.h: #define VM2_VCPU_CLOS {1U, 0U} And the macro will be used in vm_configuration.c: struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = { ... { ... .clos = VM2_VCPU_CLOS, ... } } Tracked-On: #4566 Signed-off-by: Yan, Like <like.yan@intel.com> Reviewed-by: Victor Sun <victor.sun@intel.com>
This commit is contained in:
parent
7694386663
commit
02fea0f228
@ -23,7 +23,7 @@ GUEST_FLAG = ["0UL", "GUEST_FLAG_SECURE_WORLD_ENABLED", "GUEST_FLAG_LAPIC_PASSTH
|
||||
START_HPA_SIZE_LIST = ['0x20000000', '0x40000000', '0x80000000', 'CONFIG_SOS_RAM_SIZE']
|
||||
|
||||
|
||||
MULTI_ITEM = ["guest_flag", "pcpu_id", "input", "block", "network"]
|
||||
MULTI_ITEM = ["guest_flag", "pcpu_id", "vcpu_clos", "input", "block", "network"]
|
||||
|
||||
SIZE_K = 1024
|
||||
SIZE_M = SIZE_K * 1024
|
||||
@ -36,6 +36,7 @@ class MultiItem():
|
||||
def __init__(self):
|
||||
self.guest_flag = []
|
||||
self.pcpu_id = []
|
||||
self.vcpu_clos = []
|
||||
self.vir_input = []
|
||||
self.vir_block = []
|
||||
self.vir_console = []
|
||||
@ -378,6 +379,11 @@ def get_leaf_tag_val(config_file, branch_tag, tag_str=''):
|
||||
tmp_cpus.append(leaf.text)
|
||||
continue
|
||||
|
||||
# get vcpu_clos for vm
|
||||
if leaf.tag == "vcpu_clos" and tag_str == "vcpu_clos":
|
||||
tmp_cpus.append(leaf.text)
|
||||
continue
|
||||
|
||||
# append guest flags for each vm
|
||||
if tmp_flag and tag_str == "guest_flag":
|
||||
tmp_tag.append(tmp_flag)
|
||||
@ -402,6 +408,10 @@ def get_leaf_value(tmp, tag_str, leaf):
|
||||
if leaf.tag == "pcpu_id" and tag_str == "pcpu_id":
|
||||
tmp.multi.pcpu_id.append(leaf.text)
|
||||
|
||||
# get vcpu_clos for vm
|
||||
if leaf.tag == "vcpu_clos" and tag_str == "vcpu_clos":
|
||||
tmp.multi.vcpu_clos.append(leaf.text)
|
||||
|
||||
# get virtio-input for vm
|
||||
if leaf.tag == "input" and tag_str == "input":
|
||||
tmp.multi.vir_input.append(leaf.text)
|
||||
@ -426,6 +436,10 @@ def get_sub_value(tmp, tag_str, vm_id):
|
||||
if tmp.multi.pcpu_id and tag_str == "pcpu_id":
|
||||
tmp.tag[vm_id] = tmp.multi.pcpu_id
|
||||
|
||||
# append cpus for vm
|
||||
if tmp.multi.vcpu_clos and tag_str == "vcpu_clos":
|
||||
tmp.tag[vm_id] = tmp.multi.vcpu_clos
|
||||
|
||||
# append virtio input for vm
|
||||
if tmp.multi.vir_input and tag_str == "input":
|
||||
tmp.tag[vm_id] = tmp.multi.vir_input
|
||||
|
@ -665,6 +665,29 @@ def cpus_assignment(cpus_per_vm, index):
|
||||
vm_cpu_bmp['cpu_num'] = len(cpus_per_vm[index])
|
||||
return vm_cpu_bmp
|
||||
|
||||
def clos_assignment(clos_per_vm, index):
|
||||
"""
|
||||
Get clos id assignment for vm by vm index
|
||||
:param clos_per_vm: a dictionary by vmid:cpus
|
||||
:param index: vm index
|
||||
:return: clos assignment string
|
||||
"""
|
||||
vm_clos_bmp = {}
|
||||
|
||||
for i in range(len(clos_per_vm[index])):
|
||||
if i == 0:
|
||||
if len(clos_per_vm[index]) == 1:
|
||||
clos_str = "{{{0}U}}".format(clos_per_vm[index][0])
|
||||
else:
|
||||
clos_str = "{{{0}U".format(clos_per_vm[index][0])
|
||||
else:
|
||||
if i == len(clos_per_vm[index]) - 1:
|
||||
clos_str = clos_str + ", {0}U}}".format(clos_per_vm[index][i])
|
||||
else:
|
||||
clos_str = clos_str + ", {0}U".format(clos_per_vm[index][i])
|
||||
|
||||
vm_clos_bmp['clos_map'] = clos_str
|
||||
return vm_clos_bmp
|
||||
|
||||
def get_vuart_info_id(config_file, idx):
|
||||
"""
|
||||
|
@ -278,7 +278,7 @@ class VmInfo:
|
||||
name = {}
|
||||
load_order = {}
|
||||
uuid = {}
|
||||
clos_set = {}
|
||||
clos_per_vm = {}
|
||||
guest_flag_idx = {}
|
||||
cpus_per_vm = {}
|
||||
severity = {}
|
||||
@ -307,7 +307,9 @@ class VmInfo:
|
||||
self.scenario_info, "guest_flags", "guest_flag")
|
||||
self.cpus_per_vm = scenario_cfg_lib.get_leaf_tag_map(
|
||||
self.scenario_info, "vcpu_affinity", "pcpu_id")
|
||||
self.clos_set = scenario_cfg_lib.get_leaf_tag_map(self.scenario_info, "clos")
|
||||
self.clos_per_vm = scenario_cfg_lib.get_leaf_tag_map(
|
||||
self.scenario_info, "clos", "vcpu_clos")
|
||||
|
||||
self.severity = scenario_cfg_lib.get_leaf_tag_map(self.scenario_info, "severity")
|
||||
self.epc_section.get_info()
|
||||
self.mem_info.get_info()
|
||||
@ -323,6 +325,13 @@ class VmInfo:
|
||||
"""
|
||||
return scenario_cfg_lib.cpus_assignment(self.cpus_per_vm, index)
|
||||
|
||||
def get_clos_bitmap(self, index):
|
||||
"""
|
||||
:param index: index list in GUESF_FLAGS
|
||||
:return: clos per vm and their vm id
|
||||
"""
|
||||
return scenario_cfg_lib.clos_assignment(self.clos_per_vm, index)
|
||||
|
||||
def check_item(self):
|
||||
"""
|
||||
Check all items in this class
|
||||
|
@ -201,9 +201,8 @@ def clos_output(vm_info, i, config):
|
||||
common_clos_max = min(rdt_res_clos_max)
|
||||
else:
|
||||
common_clos_max = 0
|
||||
if len(rdt_res) != 0 and common_clos_max !=0 and i in vm_info.clos_set:
|
||||
print("\t\t.clos = {0}U,".format(vm_info.clos_set[i]), file=config)
|
||||
|
||||
if len(rdt_res) != 0 and common_clos_max !=0 and i in vm_info.clos_per_vm:
|
||||
print("\t\t.clos = VM{}_VCPU_CLOS,".format(i), file=config)
|
||||
|
||||
def get_guest_flag(flag_index):
|
||||
"""
|
||||
@ -274,7 +273,6 @@ def gen_sdc_source(vm_info, config):
|
||||
"there is supposed to be the highest severity guest */", file=config)
|
||||
print("\t\t.guest_flags = {0},".format(sos_guest_flags), file=config)
|
||||
vcpu_affinity_output(vm_info, 0, config)
|
||||
clos_output(vm_info, 0, config)
|
||||
print("\t\t.severity = {0},".format(vm_info.severity[0].strip()), file=config)
|
||||
print("\t\t.memory = {", file=config)
|
||||
print("\t\t\t.start_hpa = {}UL,".format(vm_info.mem_info.mem_start_hpa[0]), file=config)
|
||||
@ -555,9 +553,9 @@ def gen_industry_source(vm_info, config):
|
||||
|
||||
vcpu_affinity_output(vm_info, i, config)
|
||||
print("\t\t.severity = {0},".format(vm_info.severity[i].strip()), file=config)
|
||||
clos_output(vm_info, i, config)
|
||||
|
||||
if i == 0:
|
||||
clos_output(vm_info, i, config)
|
||||
print("\t\t.memory = {", file=config)
|
||||
print("\t\t\t.start_hpa = 0UL,", file=config)
|
||||
print("\t\t\t.size = CONFIG_SOS_RAM_SIZE,", file=config)
|
||||
|
@ -4,6 +4,7 @@
|
||||
#
|
||||
|
||||
import scenario_cfg_lib
|
||||
import board_cfg_lib
|
||||
|
||||
VM_HEADER_DEFINE = scenario_cfg_lib.HEADER_LICENSE + r"""
|
||||
#ifndef VM_CONFIGURATIONS_H
|
||||
@ -35,6 +36,24 @@ def cpu_affinity_output(vm_info, i, config):
|
||||
print("#define VM{0}_CONFIG_VCPU_AFFINITY\t\t{1}".format(
|
||||
i, cpu_bits['cpu_map']), file=config)
|
||||
|
||||
def clos_config_output(vm_info, i, config):
|
||||
"""
|
||||
Output the macro vcpu affinity
|
||||
:param vm_info: the data structure have all the xml items values
|
||||
:param i: the index of vm id
|
||||
:param config: file pointor to store the information
|
||||
"""
|
||||
(rdt_res, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(scenario_cfg_lib.BOARD_INFO_FILE)
|
||||
if len(rdt_res_clos_max) != 0:
|
||||
common_clos_max = min(rdt_res_clos_max)
|
||||
else:
|
||||
common_clos_max = 0
|
||||
|
||||
if common_clos_max == 0:
|
||||
return
|
||||
|
||||
clos_config = vm_info.get_clos_bitmap(i)
|
||||
print("#define VM{0}_VCPU_CLOS\t\t{1}".format(i, clos_config['clos_map']), file=config)
|
||||
|
||||
def scenario_vm_num(load_type_cnt, config):
|
||||
|
||||
@ -70,16 +89,19 @@ def gen_sdc_header(vm_info, config):
|
||||
print("#if CONFIG_MAX_KATA_VM_NUM > 0", file=config)
|
||||
# Set VM1 vcpu
|
||||
cpu_affinity_output(vm_info, 1, config)
|
||||
clos_config_output(vm_info, 1, config)
|
||||
# KATA VM
|
||||
cpu_affinity_output(vm_info, 2, config)
|
||||
clos_config_output(vm_info, 2, config)
|
||||
#else:
|
||||
print("#else", file=config)
|
||||
# Only two VMs in SDC config, setup vcpu affinity for VM1
|
||||
cpu_affinity_output(vm_info, 1, config)
|
||||
clos_config_output(vm_info, 1, config)
|
||||
print("#endif", file=config)
|
||||
else:
|
||||
cpu_affinity_output(vm_info, 1, config)
|
||||
|
||||
clos_config_output(vm_info, 1, config)
|
||||
print("", file=config)
|
||||
print("{0}".format(VM_END_DEFINE), file=config)
|
||||
|
||||
@ -108,6 +130,7 @@ def gen_sdc2_header(vm_info, config):
|
||||
print("", file=config)
|
||||
for i in range(scenario_cfg_lib.VM_COUNT):
|
||||
cpu_affinity_output(vm_info, i, config)
|
||||
clos_config_output(vm_info, i, config)
|
||||
print("", file=config)
|
||||
print("{0}".format(VM_END_DEFINE), file=config)
|
||||
|
||||
@ -157,6 +180,7 @@ def gen_logical_partition_header(vm_info, config):
|
||||
|
||||
cpu_bits = vm_info.get_cpu_bitmap(i)
|
||||
cpu_affinity_output(vm_info, i, config)
|
||||
clos_config_output(vm_info, i, config)
|
||||
print("#define VM{0}_CONFIG_MEM_START_HPA\t\t{1}UL".format(
|
||||
i, vm_info.mem_info.mem_start_hpa[i]), file=config)
|
||||
print("#define VM{0}_CONFIG_MEM_SIZE\t\t\t{1}UL".format(
|
||||
@ -225,6 +249,7 @@ def gen_industry_header(vm_info, config):
|
||||
print("", file=config)
|
||||
for i in range(scenario_cfg_lib.VM_COUNT):
|
||||
cpu_affinity_output(vm_info, i, config)
|
||||
clos_config_output(vm_info, i, config)
|
||||
print("", file=config)
|
||||
print("{0}".format(VM_END_DEFINE), file=config)
|
||||
|
||||
@ -249,6 +274,7 @@ def gen_hybrid_header(vm_info, config):
|
||||
print("", file=config)
|
||||
for i in range(scenario_cfg_lib.VM_COUNT):
|
||||
cpu_affinity_output(vm_info, i, config)
|
||||
clos_config_output(vm_info, i, config)
|
||||
|
||||
print("#define VM0_CONFIG_MEM_START_HPA\t{0}UL".format(
|
||||
vm_info.mem_info.mem_start_hpa[0]), file=config)
|
||||
|
Loading…
Reference in New Issue
Block a user