diff --git a/misc/config_tools/board_config/board_c.py b/misc/config_tools/board_config/board_c.py index 971b31785..8082c4387 100644 --- a/misc/config_tools/board_config/board_c.py +++ b/misc/config_tools/board_config/board_c.py @@ -7,6 +7,8 @@ import sys import enum import board_cfg_lib import common +import lxml.etree +import os class RDT(enum.Enum): L2 = 0 @@ -17,6 +19,7 @@ INCLUDE_HEADER = """ #include #include #include +#include #include #include """ @@ -102,9 +105,7 @@ def populate_clos_mask_msr(rdt_res, cat_mask_list, config): idx = 0 for cat_mask in cat_mask_list: print("\t{", file=config) - print("\t\t.value.clos_mask = CLOS_MASK_{},".format(idx), file=config) - print("\t\t.msr_index = MSR_IA32_{0}_MASK_BASE + {1},".format( - rdt_res, idx), file=config) + print("\t\t.clos_mask = {},".format(cat_mask), file=config) print("\t},", file=config) idx += 1 @@ -118,58 +119,148 @@ def populate_mba_delay_mask(rdt_res, mba_delay_list, config): idx = 0 for mba_delay_mask in mba_delay_list: print("\t{", file=config) - print("\t\t.value.mba_delay = MBA_MASK_{},".format(idx), file=config) - print("\t\t.msr_index = MSR_IA32_{0}_MASK_BASE + {1},".format( - rdt_res, idx), file=config) + print("\t\t.mba_delay = ,".format(mba_delay_mask), file=config) print("\t},", file=config) idx += 1 - -def gen_rdt_res(config): - """ - Get RDT resource (L2, L3, MBA) information - :param config: it is a file pointer of board information for writing to - """ - err_dic = {} - rdt_res_str ="" - res_present = [0, 0, 0] - (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE) - common_clos_max = board_cfg_lib.get_common_clos_max() - - cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK") - mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY") - - if common_clos_max > MSR_IA32_L2_MASK_END - MSR_IA32_L2_MASK_BASE or\ - common_clos_max > MSR_IA32_L3_MASK_END - MSR_IA32_L3_MASK_BASE: - err_dic["board config: generate board.c failed"] = "CLOS MAX should be less than reserved adress region length of L2/L3 cache" - return err_dic - - print("\n#ifdef CONFIG_RDT_ENABLED", file=config) - if len(rdt_resources) == 0 or common_clos_max == 0: - print("struct platform_clos_info platform_{0}_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];".format("l2"), file=config) - print("struct platform_clos_info platform_{0}_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];".format("l3"), file=config) - print("struct platform_clos_info platform_{0}_clos_array[MAX_MBA_CLOS_NUM_ENTRIES];".format("mba"), file=config) +def get_rdt_enabled(): + scenario_etree = lxml.etree.parse(common.SCENARIO_INFO_FILE) + enable = scenario_etree.xpath(f"//RDT_ENABLED/text()") + if enable[0] == "y": + return "true" else: - for idx, rdt_res in enumerate(rdt_resources): - if rdt_res == "L2": - rdt_res_str = "l2" - print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, - "MAX_CACHE_CLOS_NUM_ENTRIES"), file=config) - populate_clos_mask_msr(rdt_res, cat_mask_list, config) + return "false" + +def get_cdp_enabled(): + scenario_etree = lxml.etree.parse(common.SCENARIO_INFO_FILE) + enable = scenario_etree.xpath(f"//CDP_ENABLED/text()") + if enable[0] == "y": + return "true" + else: + return "false" + +def get_common_clos_max(clos_number, capability_id): + + common_clos_max = 0 + if get_rdt_enabled() and not get_cdp_enabled(): + common_clos_max = clos_number + if get_cdp_enabled() and capability_id != 'MBA': + common_clos_max = clos_number / 2 + + return common_clos_max + +def gen_rdt_str(cache, config): + err_dic = {} + cat_mask_list = {} + + board_etree = lxml.etree.parse(common.BOARD_INFO_FILE) + mask_length = common.get_node(f"./capability[@id='CAT']/capacity_mask_length/text()", cache) + clos_number = common.get_node(f"./capability[@id='CAT']/clos_number/text()", cache) + + bitmask = (1 << int(mask_length)) - 1 + cache_level = common.get_node(f"./@level", cache) + cache_id = common.get_node(f"./@id", cache) + processor_list = board_etree.xpath(f"//cache[@level = '{cache_level}' and @id = '{cache_id}']/processors/processor/text()") + capability_list = board_etree.xpath(f"//cache[@level = '{cache_level}' and @id = '{cache_id}']/capability/@id") + + for capability_id in capability_list: + + common_clos_max = get_common_clos_max(int(clos_number), capability_id) + if capability_id == "CAT": + if common_clos_max > MSR_IA32_L2_MASK_END - MSR_IA32_L2_MASK_BASE or\ + common_clos_max > MSR_IA32_L3_MASK_END - MSR_IA32_L3_MASK_BASE: + err_dic["board config: Failed to generate board.c"] = "CLOS Mask Number is more then the reserved address region length of L2/L3 cache" + return err_dic + + cdp_enable = get_cdp_enabled() + cat_mask_list = get_mask_list(cache_level, int(cache_id, 16)) + if len(cat_mask_list) > int(clos_number): + err_dic['board config: Failed to generate board.c'] = "CLOS Mask Number too bigger then the supported of L2/L3 cache" + return err_dic; + + if cache_level == "2": + rdt_res = "l2" + elif cache_level == "3": + rdt_res = "l3" + + clos_config_array = "platform_l{0}_clos_array_{1}".format(cache_level, int(cache_id, 16)) + + print("\t{", file=config) + print("\t\t.res.cache = {", file=config) + print("\t\t\t.bitmask = {0},".format(hex(bitmask)), file=config) + print("\t\t\t.cbm_len = {0},".format(mask_length), file=config) + print("\t\t\t.is_cdp_enabled = {0},".format(cdp_enable), file=config) + print("\t\t},", file=config) + elif capability_id == "MBA": + max_throttling_value = common.get_node(f"./capability/max_throttling_value/text()", cache) + rdt_res = "mba" + clos_config_array = "platform_mba_clos_array" + print("\t{", file=config) + print("\t\t.res.membw = {", file=config) + print("\t\t\t.mba_max = {0},".format(clos_number), file=config) + print("\t\t\t.delay_linear = {0}".format(max_throttling_value), file=config) + print("\t\t},", file=config) + + print("\t\t.num_closids = {0},".format(clos_number), file=config) + print("\t\t.num_clos_config = {0},".format(len(cat_mask_list)), file=config) + print("\t\t.clos_config_array = {0},".format(clos_config_array), file=config) + + cpu_mask = 0 + for processor in processor_list: + core_id = common.get_node(f"//core[@id = '{processor}']/thread/cpu_id/text()", board_etree) + if core_id is None: + continue + else: + cpu_mask = cpu_mask | (1 << int(core_id)) + print("\t\t.cpu_mask = {0},".format(hex(cpu_mask)), file=config) + print("\t},", file=config) + + return err_dic; + +def get_mask_list(cache_level, cache_id): + allocation_dir = os.path.split(common.SCENARIO_INFO_FILE)[0] + "/configs/allocation.xml" + allocation_etree = lxml.etree.parse(allocation_dir) + if cache_level == "3": + clos_list = allocation_etree.xpath(f"//clos_mask[@id = 'l3']/clos/text()") + else: + clos_list = allocation_etree.xpath(f"//clos_mask[@id = '{cache_id}']/clos/text()") + return clos_list +def gen_clos_array(cache_list, config): + err_dic = {} + res_present = [0, 0, 0] + if len(cache_list) == 0: + print("union clos_config platform_{0}_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];".format("l2"), file=config) + print("union clos_config platform_{0}_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];".format("l3"), file=config) + print("union clos_config platform_{0}_clos_array[MAX_MBA_CLOS_NUM_ENTRIES];".format("mba"), file=config) + print("struct rdt_info res_infos[RDT_INFO_NUMBER];", file=config) + else: + for idx, cache in enumerate(cache_list): + cache_level = common.get_node(f"./@level", cache) + cache_id = common.get_node(f"./@id", cache) + clos_number = common.get_node(f"./capability/clos_number/text()", cache) + if cache_level == "2": + + cat_mask_list = get_mask_list(cache_level, int(cache_id, 16)) + array_size = len(cat_mask_list) + + print("union clos_config platform_l2_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config) + + populate_clos_mask_msr("L2", cat_mask_list, config) + print("};\n", file=config) - res_present[RDT.L2.value] = 1 - elif rdt_res == "L3": - rdt_res_str = "l3" - print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, - "MAX_CACHE_CLOS_NUM_ENTRIES"), file=config) - populate_clos_mask_msr(rdt_res, cat_mask_list, config) + res_present[RDT.L2.value] += 1 + elif cache_level == "3": + cat_mask_list = get_mask_list(cache_level, int(cache_id, 16)) + + print("union clos_config platform_l3_clos_array_{0}[{1}] = {{".format(int(cache_id, 16), clos_number), file=config) + + populate_clos_mask_msr("L3", cat_mask_list, config) + print("};\n", file=config) - res_present[RDT.L3.value] = 1 - elif rdt_res == "MBA": - rdt_res_str = "mba" - print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, - "MAX_MBA_CLOS_NUM_ENTRIES"), file=config) - err_dic = populate_mba_delay_mask(rdt_res, mba_delay_list, config) + res_present[RDT.L3.value] += 1 + elif cache_level == "MBA": + print("union clos_config platform_mba_clos_array[MAX_MBA_CLOS_NUM_ENTRIES] = {", file=config) + err_dic = populate_mba_delay_mask("mba", mba_delay_list, config) print("};\n", file=config) res_present[RDT.MBA.value] = 1 else: @@ -177,18 +268,93 @@ def gen_rdt_res(config): return err_dic if res_present[RDT.L2.value] == 0: - print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("l2", "MAX_CACHE_CLOS_NUM_ENTRIES"), file=config) + print("union clos_config platform_l2_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];", file=config) if res_present[RDT.L3.value] == 0: - print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("l3", "MAX_CACHE_CLOS_NUM_ENTRIES"), file=config) + print("union clos_config platform_l3_clos_array[MAX_CACHE_CLOS_NUM_ENTRIES];", file=config) if res_present[RDT.MBA.value] == 0: - print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("mba", "MAX_MBA_CLOS_NUM_ENTRIES"), file=config) + print("union clos_config platform_mba_clos_array[MAX_MBA_CLOS_NUM_ENTRIES];", file=config) + return 0 - print("#endif", file=config) +def gen_rdt_res(config): + """ + Get RDT resource (L2, L3, MBA) information + :param config: it is a file pointer of board information for writing to + """ + print("\n#ifdef CONFIG_RDT_ENABLED", file=config) + err_dic = {} + res_present = [0, 0, 0] + + scenario_etree = lxml.etree.parse(common.SCENARIO_INFO_FILE) + allocation_etree = lxml.etree.parse(common.SCENARIO_INFO_FILE) + board_etree = lxml.etree.parse(common.BOARD_INFO_FILE) + + cache_list = board_etree.xpath(f"//cache[capability/@id = 'CAT' or capability/@id = 'MBA']") + gen_clos_array(cache_list, config) + + cache_list = board_etree.xpath(f"//cache[capability/@id = 'CAT' and @level = '2']") + if len(cache_list) > 0: + res_present[RDT.L2.value] = len(cache_list) + rdt_ins_name = "rdt_ins_l2[" + str(len(cache_list)) + "] = {" + print("struct rdt_ins {}".format(rdt_ins_name), file=config) + for idx, cache in enumerate(cache_list): + err_dic = gen_rdt_str(cache, config) + if err_dic: + return err_dic; + print("};\n", file=config) + + cache_list = board_etree.xpath(f"//cache[capability/@id = 'CAT' and @level = '3']") + if len(cache_list) > 0: + res_present[RDT.L3.value] = len(cache_list) + rdt_ins_name = "rdt_ins_l3[" + str(len(cache_list)) + "] = {" + print("struct rdt_ins {}".format(rdt_ins_name), file=config) + for idx, cache in enumerate(cache_list): + err_dic = gen_rdt_str(cache, config) + if err_dic: + return err_dic; + print("};\n", file=config) + + cache_list = board_etree.xpath(f"//cache[capability/@id = 'MBA']") + if len(cache_list) > 0: + res_present[RDT.L2.value] = 1 + rdt_ins_name = "rdt_ins_mba[" + str(len(cache_list)) + "] = {" + print("struct rdt_ins {}".format(rdt_ins_name), file=config) + for idx, cache in enumerate(cache_list): + err_dic = gen_rdt_str(cache, config) + if err_dic: + return err_dic; + print("};\n", file=config) + + print("struct rdt_type res_cap_info[RDT_NUM_RESOURCES] = {", file=config) + if res_present[RDT.L2.value] > 0: + print("\t{", file=config) + print("\t\t.res_id = RDT_RESID_L2,", file=config) + print("\t\t.msr_qos_cfg = MSR_IA32_L2_QOS_CFG,", file=config) + print("\t\t.msr_base = MSR_IA32_L2_MASK_BASE,", file=config) + print("\t\t.num_ins = {},".format(res_present[RDT.L2.value]), file=config) + print("\t\t.ins_array = rdt_ins_l2,", file=config) + print("\t},", file=config) + if res_present[RDT.L3.value] > 0: + print("\t{", file=config) + print("\t\t.res_id = RDT_RESID_L3,", file=config) + print("\t\t.msr_qos_cfg = MSR_IA32_L3_QOS_CFG,", file=config) + print("\t\t.msr_base = MSR_IA32_L3_MASK_BASE,", file=config) + print("\t\t.num_ins = {},".format(res_present[RDT.L3.value]), file=config) + print("\t\t.ins_array = rdt_ins_l3,", file=config) + print("\t},", file=config) + if res_present[RDT.MBA.value] > 0: + print("\t{", file=config) + print("\t\t.res_id = RDT_RESID_MBA,", file=config) + print("\t\t.msr_qos_cfg = MSR_IA32_MBA_QOS_CFG,", file=config) + print("\t\t.msr_base = MSR_IA32_MBA_MASK_BASE,", file=config) + print("\t\t.num_ins = {},".format(res_present[RDT.MBA.value]), file=config) + print("\t\t.ins_array = rdt_ins_mba,", file=config) + print("\t},", file=config) + print("};\n", file=config) + + print("#endif\n", file=config) - print("", file=config) return err_dic - def gen_single_data(data_lines, domain_str, config): line_i = 0 data_statues = True diff --git a/misc/config_tools/schema/config.xsd b/misc/config_tools/schema/config.xsd index 4104a6ddc..53c249d44 100644 --- a/misc/config_tools/schema/config.xsd +++ b/misc/config_tools/schema/config.xsd @@ -277,6 +277,11 @@ These settings can only be changed at build time. Refer to :ref:`vuart_config` for detailed vUART settings. + + + Specify the cache setting. + + diff --git a/misc/config_tools/schema/types.xsd b/misc/config_tools/schema/types.xsd index 2dc286574..059fe887c 100644 --- a/misc/config_tools/schema/types.xsd +++ b/misc/config_tools/schema/types.xsd @@ -320,20 +320,6 @@ RDT, setting this option to ``y`` is ignored. Enable virtualization of the Cache Allocation Technology (CAT) feature in RDT. CAT enables you to allocate cache to VMs, providing isolation to avoid performance interference from other VMs. - - - Specify the cache capacity bitmask for the CLOS; only continuous '1' bits -are allowed. The value will be ignored when hardware does not support RDT. -This option takes effect only if :option:`hv.FEATURES.RDT.RDT_ENABLED` is set to ``y``. -As :option:`vm.clos.vcpu_clos` specifies the index of the CLOS to be associated with the given vCPU, -:option:`hv.FEATURES.RDT.CLOS_MASK` of that CLOS would impact the performance of the given vCPU. - - - - - Memory Bandwidth Allocation delay value. - - @@ -347,4 +333,38 @@ As :option:`vm.clos.vcpu_clos` specifies the index of the CLOS to be associated + + + Option: Unified, Code, Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/misc/config_tools/static_allocators/clos.py b/misc/config_tools/static_allocators/clos.py new file mode 100644 index 000000000..4f555f86a --- /dev/null +++ b/misc/config_tools/static_allocators/clos.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import sys, os +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library')) +import common +import re +from collections import defaultdict +from itertools import combinations + +def create_clos_node(etree, vm_id, index_list): + allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree) + if allocation_vm_node is None: + allocation_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id) + if common.get_node("./clos", allocation_vm_node) is None: + clos_node = common.append_node("./clos", None, allocation_vm_node) + for index in index_list: + common.append_node(f"./vcpu_clos", str(index), clos_node) + +def find_cache2_id(mask, cache2_id_list): + for cache2 in cache2_id_list: + if mask[cache2] != "None": + return cache2 + return "None" + +def merge_policy_list(mask_list, cache2_id_list): + index = 0 + result_list = [] + for index,mask in enumerate(mask_list): + merged = 0 + if index == 0: + result_list.append(mask) + continue + for result in result_list: + if result["l3"] != mask["l3"]: + continue + else: + cache2_id = find_cache2_id(mask, cache2_id_list) + if cache2_id == "None" or result[cache2_id] == mask[cache2_id]: + merged = 1 + break + if result[cache2_id] == "None": + merged = 1 + result[cache2_id] = mask[cache2_id] + break + if merged == 0: + result_list.append(mask) + return result_list + +def gen_all_clos_index(board_etree, scenario_etree, allocation_etree): + policy_list = [] + allocation_list = scenario_etree.xpath(f"//POLICY") + cache2_id_list = scenario_etree.xpath("//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()") + cache2_id_list.sort() + + for policy in allocation_list: + cache_level = common.get_node("../CACHE_LEVEL/text()", policy) + cache_id = common.get_node("../CACHE_ID/text()", policy) + vcpu = common.get_node("./VCPU/text()", policy) + mask = common.get_node("./CLOS_MASK/text()", policy) + tmp = (cache_level, cache_id, vcpu, mask) + policy_list.append(tmp) + + vCPU_list = scenario_etree.xpath(f"//POLICY/VCPU/text()") + l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY/CLOS_MASK") + mask_list = [] + for vCPU in vCPU_list: + dict_tmp = {} + l3_mask = l2_mask = "None" + l3_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 3]/POLICY[VCPU = '{vCPU}']/CLOS_MASK/text()") + if len(l3_mask_list) > 0: + l3_mask = l3_mask_list[0] + dict_tmp["l3"] = l3_mask + + l2_mask_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2]/POLICY[VCPU = '{vCPU}']/CLOS_MASK") + if len(l2_mask_list) > 0: + l2_mask = l2_mask_list[0].text + cache_id = scenario_etree.xpath(f"//CACHE_ALLOCATION[CACHE_LEVEL = 2 and POLICY/VCPU = '{vCPU}']/CACHE_ID/text()")[0] + for cache2 in cache2_id_list: + if cache2 == cache_id: + dict_tmp[cache_id] = l2_mask + else: + dict_tmp[cache2] = "None" + mask_list.append(dict_tmp) + mask_list = merge_policy_list(mask_list, cache2_id_list) + return mask_list + +def get_clos_index(cache_level, cache_id, clos_mask): + mask_list = common.get_mask_list(cache_level, cache_id) + idx = 0 + for mask in mask_list: + idx += 1 + if mask == clos_mask: + break + return idx +def get_clos_id(mask_list, l2_id, l2_mask, l3_mask): + for mask in mask_list: + if mask[l2_id] == l2_mask and mask["l3"] == l3_mask: + return mask_list.index(mask) + return 0 + +def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list): + vm_node_list = scenario_etree.xpath("//vm") + for vm_node in vm_node_list: + vmname = common.get_node("./name/text()", vm_node) + allocation_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[POLICY/VM = '{vmname}']") + for allocation in allocation_list: + index_list = [] + cache_level = common.get_node("./CACHE_LEVEL/text()", allocation) + cache_id = common.get_node("./CACHE_ID/text()", allocation) + clos_mask_list = allocation.xpath(f".//POLICY[VM = '{vmname}']/CLOS_MASK/text()") + + for clos_mask in clos_mask_list: + index = get_clos_id(mask_list, cache_id, clos_mask, "None") + index_list.append(index) + create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list) + +def creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list): + allocation_hv_node = common.get_node(f"//hv", allocation_etree) + if allocation_hv_node is None: + allocation_hv_node = common.append_node("//hv", None, allocation_etree, id = vm_id) + cache2_id_list = scenario_etree.xpath("//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()") + cache2_id_list.sort() + if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None: + clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3") + for i in range(0, len(mask_list)): + if mask_list[i]["l3"] == "None": + value = "0xffff" + else: + value = str(mask_list[i]["l3"]) + common.append_node(f"./clos", value, clos_mask) + + for cache2 in cache2_id_list: + if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None: + clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2) + for i in range(0, len(mask_list)): + if mask_list[i][cache2] == "None": + value = "0xffff" + else: + value = str(mask_list[i][cache2] ) + common.append_node(f"./clos", value, clos_mask) + +def fn(board_etree, scenario_etree, allocation_etree): + mask_list = gen_all_clos_index(board_etree, scenario_etree, allocation_etree) + creat_mask_list_node(board_etree, scenario_etree, allocation_etree, mask_list) + alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list) diff --git a/misc/config_tools/xforms/lib.xsl b/misc/config_tools/xforms/lib.xsl index 6b3631d8c..0e28f89f1 100644 --- a/misc/config_tools/xforms/lib.xsl +++ b/misc/config_tools/xforms/lib.xsl @@ -564,12 +564,12 @@ - + - + diff --git a/misc/config_tools/xforms/misc_cfg.h.xsl b/misc/config_tools/xforms/misc_cfg.h.xsl index 48faa8a39..64426b1c0 100644 --- a/misc/config_tools/xforms/misc_cfg.h.xsl +++ b/misc/config_tools/xforms/misc_cfg.h.xsl @@ -152,12 +152,6 @@ - - - - - - diff --git a/misc/config_tools/xforms/vm_configurations.c.xsl b/misc/config_tools/xforms/vm_configurations.c.xsl index 7a4dcb5d8..b16565961 100644 --- a/misc/config_tools/xforms/vm_configurations.c.xsl +++ b/misc/config_tools/xforms/vm_configurations.c.xsl @@ -68,8 +68,9 @@ - - + + + }; @@ -179,18 +180,17 @@ - - + + - - + - +