diff --git a/misc/config_tools/board_inspector/acpiparser/aml/context.py b/misc/config_tools/board_inspector/acpiparser/aml/context.py index eb467b3ae..90fac29a3 100644 --- a/misc/config_tools/board_inspector/acpiparser/aml/context.py +++ b/misc/config_tools/board_inspector/acpiparser/aml/context.py @@ -253,7 +253,7 @@ class Context: elif new_tree.label == "DefExternal": pass else: - logging.warning(f"{symbol.name} is redefined as {new_tree.label} (previously was {old_tree.label})") + logging.debug(f"{symbol.name} is redefined as {new_tree.label} (previously was {old_tree.label})") self.__register_symbol(symbol) else: self.__register_symbol(symbol) diff --git a/misc/config_tools/board_inspector/acpiparser/aml/datatypes.py b/misc/config_tools/board_inspector/acpiparser/aml/datatypes.py index a69903a23..cec0e8d64 100644 --- a/misc/config_tools/board_inspector/acpiparser/aml/datatypes.py +++ b/misc/config_tools/board_inspector/acpiparser/aml/datatypes.py @@ -326,14 +326,14 @@ class OperationRegion(Object): if not cls.devmem: cls.devmem = open("/dev/mem", "rb", buffering=0) - logging.info(f"Open system memory space {name}: [{hex(offset)}, {hex(offset + length - 1)}]") + logging.debug(f"Open system memory space {name}: [{hex(offset)}, {hex(offset + length - 1)}]") offset_page_aligned = (offset >> 12) << 12 length_page_aligned = ceil(((offset & 0xFFF) + length) / 0x1000) * 0x1000 try: mm = mmap.mmap(cls.devmem.fileno(), length_page_aligned, flags=mmap.MAP_PRIVATE, prot=mmap.PROT_READ, offset=offset_page_aligned) except PermissionError as e: - logging.warning(f"Do not have permission to access [{hex(offset_page_aligned)}, {hex(offset_page_aligned + length_page_aligned)}] by /dev/mem.") - logging.warning(f"You may need to add `iomem=relaxed` to the Linux kernel command line in your bootloader configuration file.") + logging.debug(f"Do not have permission to access [{hex(offset_page_aligned)}, {hex(offset_page_aligned + length_page_aligned)}] by /dev/mem.") + logging.debug(f"You may need to add `iomem=relaxed` to the Linux kernel command line in your bootloader configuration file.") raise iobuf = StreamIOBuffer(mm, offset & 0xFFF, length) return OperationRegion(iobuf) @@ -343,7 +343,7 @@ class OperationRegion(Object): if not cls.devport: cls.devport = open("/dev/port", "w+b", buffering=0) - logging.info(f"Open system I/O space {name}: [{hex(offset)}, {hex(offset + length - 1)}]") + logging.debug(f"Open system I/O space {name}: [{hex(offset)}, {hex(offset + length - 1)}]") iobuf = StreamIOBuffer(cls.devport, offset, length) return OperationRegion(iobuf) @@ -360,14 +360,14 @@ class OperationRegion(Object): iobuf = StreamIOBuffer(f, offset, length) return OperationRegion(iobuf) except FileNotFoundError: - logging.warning(f"Cannot read the configuration space of %02x:%02x.%d from {sysfs_path}. Assume the PCI device does not exist." % (bus, device, function)) + logging.debug(f"Cannot read the configuration space of %02x:%02x.%d from {sysfs_path}. Assume the PCI device does not exist." % (bus, device, function)) data = bytearray([0xff]) * length buf = Buffer(data) return OperationRegion(buf) @classmethod def open_indexed_region(cls, index_register, data_register): - logging.info(f"Open I/O region indexed by index register {index_register.to_string()} and data register {data_register.to_string()}.") + logging.debug(f"Open I/O region indexed by index register {index_register.to_string()} and data register {data_register.to_string()}.") k = (str(index_register), str(data_register)) if k not in cls.opened_indexed_regions.keys(): iobuf = IndexedIOBuffer(index_register, data_register) @@ -400,9 +400,9 @@ class OperationRegion(Object): self.__iobuf.write_field(name, value) else: if isinstance(value, int): - logging.info(f"Skip writing 0x{value:0X} to I/O field {name}") + logging.debug(f"Skip writing 0x{value:0X} to I/O field {name}") else: - logging.info(f"Skip writing {value} to I/O field {name}") + logging.debug(f"Skip writing {value} to I/O field {name}") def set_field_writable(self, name): self.__writable_fields.add(name) diff --git a/misc/config_tools/board_inspector/acpiparser/aml/interpreter.py b/misc/config_tools/board_inspector/acpiparser/aml/interpreter.py index a3f901ec9..d16fdee88 100644 --- a/misc/config_tools/board_inspector/acpiparser/aml/interpreter.py +++ b/misc/config_tools/board_inspector/acpiparser/aml/interpreter.py @@ -302,7 +302,7 @@ class ConcreteInterpreter(Interpreter): return Device(sym) def DefExternal(self, tree): - logging.info(f"The loaded tables do not have a definition of {tree.children[0].value}") + logging.debug(f"The loaded tables do not have a definition of {tree.children[0].value}") return None def DefField(self, tree): @@ -502,7 +502,7 @@ class ConcreteInterpreter(Interpreter): if isinstance(ref, ObjectReference): return ref.get() else: - logging.warn(f"Attempt to dereference an object of type {ref.__class__.__name__}") + logging.debug(f"Attempt to dereference an object of type {ref.__class__.__name__}") return ref def DefDivide(self, tree): diff --git a/misc/config_tools/board_inspector/acpiparser/aml/parser.py b/misc/config_tools/board_inspector/acpiparser/aml/parser.py index bcc019910..488e3c422 100644 --- a/misc/config_tools/board_inspector/acpiparser/aml/parser.py +++ b/misc/config_tools/board_inspector/acpiparser/aml/parser.py @@ -28,14 +28,10 @@ class Factory: self.label = "unknown" def mark_begin(self): - if hasattr(self, "seq") and len(self.seq) > 1: - logging.debug(f"%s-> {self.label}" % (" " * self.level)) self.level += 1 def mark_end(self): self.level -= 1 - if hasattr(self, "seq") and len(self.seq) > 1: - logging.debug(f"%s<- {self.label}" % (" " * self.level)) def match(self, context, stream, tree): raise NotImplementedError @@ -431,7 +427,7 @@ class DeferredExpansion(Transformer): tree.factory = None tree.complete_parsing() except (DecodeError, DeferLater, ScopeMismatch, UndefinedSymbol) as e: - logging.info(f"expansion of {tree.label} at {hex(tree.deferred_range[0])} failed due to: " + str(e)) + logging.debug(f"expansion of {tree.label} at {hex(tree.deferred_range[0])} failed due to: " + str(e)) self.context.pop_scope() diff --git a/misc/config_tools/board_inspector/acpiparser/tpm2.py b/misc/config_tools/board_inspector/acpiparser/tpm2.py index a85614365..33f7e1d55 100644 --- a/misc/config_tools/board_inspector/acpiparser/tpm2.py +++ b/misc/config_tools/board_inspector/acpiparser/tpm2.py @@ -19,8 +19,8 @@ def tpm2_optional_data(data_len): has_log_area = True else: start_method_data_len = 12 - logging.warning(f"TPM2 data length: {data_len + 52} is greater than 64 bytes but less than 76 bytes.") - logging.warning(f"The TPM2 data is still processed but the 65 to {data_len + 52} bytes are discard.") + logging.debug(f"TPM2 data length: {data_len + 52} is greater than 64 bytes but less than 76 bytes.") + logging.debug(f"The TPM2 data is still processed but the 65 to {data_len + 52} bytes are discard.") return start_method_data_len, has_log_area def tpm2_factory(start_method_data_len, has_log_area): diff --git a/misc/config_tools/board_inspector/board_inspector.py b/misc/config_tools/board_inspector/board_inspector.py index b6cffb885..7bb1df984 100755 --- a/misc/config_tools/board_inspector/board_inspector.py +++ b/misc/config_tools/board_inspector/board_inspector.py @@ -20,6 +20,7 @@ def check_deps(): # Check that the required tools are installed on the system BIN_LIST = ['cpuid', 'rdmsr', 'lspci', ' dmidecode', 'blkid', 'stty'] cpuid_min_ver = 20170122 + had_error = False for execute in BIN_LIST: res = subprocess.Popen("which {}".format(execute), shell=True, stdout=subprocess.PIPE, @@ -27,8 +28,8 @@ def check_deps(): line = res.stdout.readline().decode('ascii') if not line: - logging.warning("'{}' cannot be found, please install it!".format(execute)) - sys.exit(1) + logging.critical("'{}' cannot be found. Please install it and run the Board Inspector again.".format(execute)) + had_error = True if execute == 'cpuid': res = subprocess.Popen("cpuid -v", @@ -37,16 +38,20 @@ def check_deps(): line = res.stdout.readline().decode('ascii') version = line.split()[2] if int(version) < cpuid_min_ver: - logging.warning("This tool requires CPUID version >= {}".format(cpuid_min_ver)) - sys.exit(1) + logging.critical("This tool requires CPUID version >= {}. Try updating and upgrading the OS" \ + "on this system and reruning the Board Inspector. If that fails, install a newer CPUID tool" \ + "from https://github.com/tycho/cpuid.".format(cpuid_min_ver)) + had_error = True + if had_error: + sys.exit(1) def native_check(): cpu_ids = get_online_cpu_ids() cpu_id = cpu_ids.pop(0) leaf_1 = parse_cpuid(1, 0, cpu_id) if leaf_1.hypervisor != 0: - logging.warning(f"Board inspector is running inside a Virtual Machine (VM). Running ACRN inside a VM is only" \ - "supported under KVM/QEMU. Unexpected results may occur when deviating from that combination.") + logging.error("Board inspector is running inside an unsupported Virtual Machine (VM). " \ + "Only KVM or QEMU is supported. Unexpected results may occur.") def bring_up_cores(): cpu_ids = get_offline_cpu_ids() @@ -100,10 +105,11 @@ def main(board_name, board_xml, args): # Finally overwrite the output with the updated XML board_etree.write(board_xml, pretty_print=True) - print("{} saved successfully!".format(board_xml)) + print("SUCCESS: Board configuration file {} generated successfully and saved to {}" \ + .format(board_xml, os.path.dirname(os.path.abspath(board_xml)))) except subprocess.CalledProcessError as e: - print(e) + logging.critical(e) sys.exit(1) if __name__ == "__main__": diff --git a/misc/config_tools/board_inspector/cpuparser/platformbase.py b/misc/config_tools/board_inspector/cpuparser/platformbase.py index 48ce3d027..154f66289 100644 --- a/misc/config_tools/board_inspector/cpuparser/platformbase.py +++ b/misc/config_tools/board_inspector/cpuparser/platformbase.py @@ -203,7 +203,9 @@ class msrfield(property): field = "[{0}]".format(msb) else: field = "[{0}:{1}]".format(msb, lsb) - raise OverflowError("Value {value:#x} too big for MSR {self.addr:#x} field {field}".format(**locals())) + raise OverflowError("Internal error: Value {value:#x} too big for MSR {self.addr:#x} field {field}. " \ + "Rerun the Board Inspector with `--loglevel debug`. If this issue persists," \ + "log a new issue at https://github.com/projectacrn/acrn-hypervisor/issues and attach the full logs.".format(**locals())) self.value = (self.value & ~field_mask) | (value << lsb) super(msrfield, self).__init__(getter, setter, doc=doc) diff --git a/misc/config_tools/board_inspector/extractors/20-cache.py b/misc/config_tools/board_inspector/extractors/20-cache.py index 0637a50eb..7d987a9a4 100644 --- a/misc/config_tools/board_inspector/extractors/20-cache.py +++ b/misc/config_tools/board_inspector/extractors/20-cache.py @@ -75,7 +75,7 @@ def extract_tcc_capabilities(caches_node): if entry.type == acpiparser.rtct.ACPI_RTCT_V1_TYPE_SoftwareSRAM: cache_node = get_node(caches_node, f"cache[@level='{entry.cache_level}' and processors/processor='{hex(entry.apic_id_tbl[0])}']") if cache_node is None: - logging.warning(f"Cannot find the level {entry.cache_level} cache of physical processor with apic ID {entry.apic_id_tbl[0]}") + logging.debug(f"Cannot find the level {entry.cache_level} cache of physical processor with apic ID {entry.apic_id_tbl[0]}") continue cap = add_child(cache_node, "capability", None, id="Software SRAM") add_child(cap, "start", "0x{:08x}".format(entry.base)) @@ -86,7 +86,7 @@ def extract_tcc_capabilities(caches_node): if entry.type == acpiparser.rtct.ACPI_RTCT_V2_TYPE_SoftwareSRAM: cache_node = get_node(caches_node, f"cache[@level='{entry.level}' and @id='{hex(entry.cache_id)}']") if cache_node is None: - logging.warning(f"Cannot find the level {entry.level} cache with cache ID {entry.cache_id}") + logging.debug(f"Cannot find the level {entry.level} cache with cache ID {entry.cache_id}") continue cap = add_child(cache_node, "capability", None, id="Software SRAM") add_child(cap, "start", "0x{:08x}".format(entry.base)) diff --git a/misc/config_tools/board_inspector/extractors/50-acpi-namespace.py b/misc/config_tools/board_inspector/extractors/50-acpi-namespace.py index 8f0750d32..af27bf80e 100644 --- a/misc/config_tools/board_inspector/extractors/50-acpi-namespace.py +++ b/misc/config_tools/board_inspector/extractors/50-acpi-namespace.py @@ -130,8 +130,8 @@ def parse_tpm(elem): add_child(log_area, "log_area_minimum_length", hex(tpm2.log_area_minimum_length)) add_child(log_area, "log_area_start_address", hex(tpm2.log_area_start_address)) except Exception as e: - logging.info(f"Parse ACPI TPM2 failed: {str(e)}") - logging.info(f"Will not extract information from ACPI TPM2") + logging.debug(f"Parse ACPI TPM2 failed: {str(e)}") + logging.debug(f"Will not extract information from ACPI TPM2") return resource_parsers = { @@ -338,7 +338,7 @@ def add_object_to_device(interpreter, device_path, obj_name, result): evaluated = (result != None) need_global = ("global" in deps.all.keys()) formatter = lambda x: '+' if x else '-' - logging.info(f"{device_path}.{obj_name}: Evaluated{formatter(evaluated)} Copy{formatter(copy_object)} NeedGlobal{formatter(need_global)}") + logging.debug(f"{device_path}.{obj_name}: Evaluated{formatter(evaluated)} Copy{formatter(copy_object)} NeedGlobal{formatter(need_global)}") if result == None or copy_object: if need_global: global_objs = ', '.join(map(lambda x: x.name, deps.all["global"])) @@ -402,7 +402,7 @@ def add_object_to_device(interpreter, device_path, obj_name, result): except: pass except NotImplementedError as e: - logging.info(f"{device_path}.{obj_name}: will not be added to vACPI, reason: {str(e)}") + logging.debug(f"{device_path}.{obj_name}: will not be added to vACPI, reason: {str(e)}") def fetch_device_info(devices_node, interpreter, namepath, args): logging.info(f"Fetch information about device object {namepath}") @@ -493,7 +493,7 @@ def fetch_device_info(devices_node, interpreter, namepath, args): if isinstance(adr, int): adr = hex(adr) if len(element.xpath(f"../*[@address='{adr}']")) > 0: - logging.info(f"{namepath} has siblings with duplicated address {adr}.") + logging.debug(f"{namepath} has siblings with duplicated address {adr}.") else: element.set("address", hex(adr) if isinstance(adr, int) else adr) add_object_to_device(interpreter, namepath, "_ADR", result) @@ -544,7 +544,7 @@ def fetch_device_info(devices_node, interpreter, namepath, args): elif isinstance(mapping.source, context.DeviceDecl): prt_info[mapping.address][mapping.pin] = (mapping.source.name, mapping.source_index) else: - logging.warning(f"The _PRT of {namepath} has a mapping with invalid source {mapping.source}") + logging.debug(f"The _PRT of {namepath} has a mapping with invalid source {mapping.source}") pin_routing_element = add_child(element, "interrupt_pin_routing") for address, pins in prt_info.items(): @@ -570,8 +570,8 @@ def extract(args, board_etree): try: namespace = parse_dsdt() except Exception as e: - logging.warning(f"Parse ACPI DSDT/SSDT failed: {str(e)}") - logging.warning(f"Will not extract information from ACPI DSDT/SSDT") + logging.debug(f"Parse ACPI DSDT/SSDT failed: {str(e)}") + logging.debug(f"Will not extract information from ACPI DSDT/SSDT") return interpreter = ConcreteInterpreter(namespace) @@ -587,7 +587,7 @@ def extract(args, board_etree): try: fetch_device_info(devices_node, interpreter, device.name, args) except Exception as e: - logging.info(f"Fetch information about device object {device.name} failed: {str(e)}") + logging.debug(f"Fetch information about device object {device.name} failed: {str(e)}") visitor = GenerateBinaryVisitor() for dev, objs in device_objects.items(): diff --git a/misc/config_tools/board_inspector/extractors/60-pci.py b/misc/config_tools/board_inspector/extractors/60-pci.py index 80563e5a2..3f223626f 100644 --- a/misc/config_tools/board_inspector/extractors/60-pci.py +++ b/misc/config_tools/board_inspector/extractors/60-pci.py @@ -107,7 +107,7 @@ def parse_device(bus_node, device_path): base = bar.base if os.path.exists(resource_path): if bar.base == 0: - logging.warning(f"PCI {device_name}: BAR {idx} exists but is programmed with all 0. This device cannot be passed through to any VM.") + logging.debug(f"PCI {device_name}: BAR {idx} exists but is programmed with all 0. This device cannot be passed through to any VM.") else: resource_node = get_node(device_node, f"./resource[@type = '{resource_type}' and @min = '{hex(base)}']") if resource_node is None: @@ -121,7 +121,7 @@ def parse_device(bus_node, device_path): resource_node.set("width", "64") resource_node.set("prefetchable", str(bar.prefetchable)) elif bar.base != 0: - logging.warning(f"PCI {device_name}: Cannot detect the size of BAR {idx}") + logging.debug(f"PCI {device_name}: Cannot detect the size of BAR {idx}") if isinstance(bar, MemoryBar64): idx += 2 else: diff --git a/misc/config_tools/board_inspector/extractors/helpers.py b/misc/config_tools/board_inspector/extractors/helpers.py index 30d195e90..ee3182760 100644 --- a/misc/config_tools/board_inspector/extractors/helpers.py +++ b/misc/config_tools/board_inspector/extractors/helpers.py @@ -15,5 +15,8 @@ def add_child(element, tag, text=None, **kwargs): def get_node(etree, xpath): result = etree.xpath(xpath) - assert len(result) <= 1, "Internal error: cannot get texts from multiple nodes at a time" + assert len(result) <= 1, \ + "Internal error: cannot get texts from multiple nodes at a time. " \ + "Rerun the Board Inspector with `--loglevel debug`. If this issue persists, " \ + "log a new issue at https://github.com/projectacrn/acrn-hypervisor/issues and attach the full logs." return result[0] if len(result) == 1 else None diff --git a/misc/config_tools/board_inspector/legacy/acpi.py b/misc/config_tools/board_inspector/legacy/acpi.py index abc5c1513..37a4d4915 100644 --- a/misc/config_tools/board_inspector/legacy/acpi.py +++ b/misc/config_tools/board_inspector/legacy/acpi.py @@ -10,7 +10,7 @@ import shutil from collections import defaultdict import dmar import parser_lib - +import logging SYS_PATH = ['/proc/cpuinfo', '/sys/firmware/acpi/tables/', '/sys/devices/system/cpu/'] @@ -438,17 +438,22 @@ def store_cx_data(sysnode1, sysnode2, config): idle_driver = acpi_idle.read(32) if idle_driver.find("acpi_idle") == -1: - parser_lib.print_yel("The Cx data for ACRN relies on " +\ - "acpi_idle driver but it is not found, ", warn=True, end=False) + logging.info("Failed to collect processor power states because the current CPU idle driver " \ + "does not expose C-state data. If you need ACPI C-states in post-launched VMs, append 'idle=nomwait' " \ + "to the kernel command line in GRUB config file.") if idle_driver.find("intel_idle") == 0: - print("please add idle=nomwait in kernel " +\ - "cmdline to fall back to acpi_idle driver") + logging.info("Failed to collect processor power states because the current CPU idle driver " \ + "does not expose C-state data. If you need ACPI C-states in post-launched VMs, append 'idle=nomwait' " \ + "to the kernel command line in GRUB config file.") else: - parser_lib.print_yel("please make sure ACPI Cstate is enabled in BIOS.", warn=True) + logging.info("Failed to collect processor power states because the platform does not provide " \ + "C-state data. If you need ACPI C-states in post-launched VMs, enable C-state support in BIOS.") print("\t/* Cx data is not available */", file=config) return except IOError: - parser_lib.print_yel("No idle driver found.", warn=True) + logging.info("Failed to collect processor power states because CPU idle PM support is disabled " \ + "in the current kernel. If you need ACPI C-states in post-launched VMs, rebuild the current kernel " \ + "with CONFIG_CPU_IDLE set to 'y' or 'm'.") print("\t/* Cx data is not available */", file=config) return @@ -512,17 +517,15 @@ def store_px_data(sysnode, config): with open(sysnode+'cpu0/cpufreq/scaling_driver', 'r') as f_node: freq_driver = f_node.read() if freq_driver.find("acpi-cpufreq") == -1: - parser_lib.print_yel("The Px data for ACRN relies on " +\ - "acpi-cpufreq driver but it is not found, ", warn=True, end=False) + logging.info("The Px data for ACRN relies on acpi-cpufreq driver but it is not found, ") if freq_driver.find("intel_pstate") == 0: - print("please add intel_pstate=disable in kernel " +\ - "cmdline to fall back to acpi-cpufreq driver") + logging.info("please add intel_pstate=disable in kernel cmdline to fall back to acpi-cpufreq driver") else: - parser_lib.print_yel("please make sure ACPI Pstate is enabled in BIOS.", warn=True) + logging.info("Enable ACPI Pstate in BIOS.") print("\t/* Px data is not available */", file=config) return except IOError: - parser_lib.print_yel("No scaling_driver found.", warn=True) + logging.info("No scaling_driver found.", warn=True) print("\t/* Px data is not available */", file=config) return @@ -531,7 +534,7 @@ def store_px_data(sysnode, config): boost = f_node.read() except IOError: boost = 0 - parser_lib.print_yel("CPU turbo is not enabled!") + logging.info("Enable CPU turbo in BIOS.") with open(sysnode + 'cpu0/cpufreq/scaling_available_frequencies', 'r') as f_node: freqs = f_node.read() @@ -546,8 +549,8 @@ def store_px_data(sysnode, config): try: subprocess.check_call('/usr/sbin/rdmsr 0x1ad', shell=True, stdout=subprocess.PIPE) except subprocess.CalledProcessError: - parser_lib.print_red("MSR 0x1ad not support in this platform!", err=True) - sys.exit(1) + logging.debug("MSR 0x1ad not support in this platform!") + return res = subprocess.Popen('/usr/sbin/rdmsr 0x1ad', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) @@ -583,8 +586,8 @@ def store_mmcfg_base_data(mmcfg_node, config): mmcfg_len_int = int.from_bytes(mmcfg_len_obj, 'little') if mmcfg_len_int > MCFG_ENTRY1_OFFSET: - parser_lib.print_red("Multiple PCI segment groups is not supported!", err=True) - sys.exit(1) + logging.debug("Multiple PCI segment groups is not supported!") + return mmcfg.seek(MCFG_ENTRY0_BASE_OFFSET, 0) mmcfg_base_addr_obj = mmcfg.read(DWORD_LEN) @@ -658,7 +661,7 @@ def generate_info(board_file): out_dir = os.path.dirname(board_file) if os.path.isfile(SYS_PATH[1] + 'PTCT'): shutil.copy(SYS_PATH[1] + 'PTCT', out_dir if out_dir != "" else "./") - print("PTCT table has been saved to {} successfully!".format(os.path.join(out_dir, 'PTCT'))) + logging.info("PTCT table has been saved to {} successfully!".format(os.path.join(out_dir, 'PTCT'))) if os.path.isfile(SYS_PATH[1] + 'RTCT'): shutil.copy(SYS_PATH[1] + 'RTCT', out_dir if out_dir != "" else "./") - print("RTCT table has been saved to {} successfully!".format(os.path.join(out_dir, 'RTCT'))) + logging.info("RTCT table has been saved to {} successfully!".format(os.path.join(out_dir, 'RTCT'))) diff --git a/misc/config_tools/board_inspector/legacy/board_parser.py b/misc/config_tools/board_inspector/legacy/board_parser.py index 93fa57880..24bd8697a 100755 --- a/misc/config_tools/board_inspector/legacy/board_parser.py +++ b/misc/config_tools/board_inspector/legacy/board_parser.py @@ -14,6 +14,7 @@ import acpi import clos import misc import parser_lib +import logging OUTPUT = "./out/" PY_CACHE = "__pycache__" @@ -23,7 +24,7 @@ CPU_VENDOR = "GenuineIntel" def check_permission(): """Check if it is root permission""" if os.getuid(): - parser_lib.print_red("You need run this tool with root privileges (sudo)!") + logging.critical("Run this tool with root privileges (sudo).") sys.exit(1) def vendor_check(): @@ -59,18 +60,19 @@ def check_env(): stderr=subprocess.PIPE, close_fds=True) err_msg = res.stderr.readline().decode('ascii') if err_msg: - parser_lib.print_red("{}".format(err_msg), err=True) + logging.critical("{}".format(err_msg)) exit(-1) msr_info = check_msr_files(cpu_dirs) if msr_info: for cpu_num in msr_info: - parser_lib.print_red("Missing CPU msr file in the {}/{}/".format(cpu_dirs, cpu_num), err=True) - parser_lib.print_red("Missing CPU msr file, please check the value of CONFIG_X86_MSR in the kernel config.", err=True) + logging.critical("Missing CPU MSR file at {}/{}/msr".format(cpu_dirs, cpu_num)) + logging.critical("Missing CPU MSR file /dev/cpu/#/msr. Check the value of CONFIG_X86_MSR in the kernel config." \ + " Set it to 'Y' and rebuild the OS. Then rerun the Board Inspector.") exit(-1) # check cpu vendor id if not vendor_check(): - parser_lib.print_red("Please run this tools on {}!".format(CPU_VENDOR)) + logging.critical(f"Unsupported processor {CPU_VENDOR} found. ACRN requires using a {CPU_VENDOR} processor.") sys.exit(1) if os.path.exists(OUTPUT): diff --git a/misc/config_tools/board_inspector/legacy/clos.py b/misc/config_tools/board_inspector/legacy/clos.py index 43b0db0bd..d2b117a0a 100644 --- a/misc/config_tools/board_inspector/legacy/clos.py +++ b/misc/config_tools/board_inspector/legacy/clos.py @@ -4,6 +4,7 @@ # import parser_lib +import logging RDT_TYPE = { "L2":4, @@ -64,7 +65,7 @@ def get_clos_info(): rdt_res = dump_cpuid_reg(cmd, "ebx") if len(rdt_res) == 0: - parser_lib.print_yel("Resource Allocation is not supported!") + logging.debug("Resource Allocation is not supported!") else: for i in range(len(rdt_res)): if rdt_res[i] == "L2": diff --git a/misc/config_tools/board_inspector/smbiosparser/smbios.py b/misc/config_tools/board_inspector/smbiosparser/smbios.py index b7303fb01..c0dd383e9 100644 --- a/misc/config_tools/board_inspector/smbiosparser/smbios.py +++ b/misc/config_tools/board_inspector/smbiosparser/smbios.py @@ -9,6 +9,7 @@ import sys import ctypes import struct import uuid +import logging import inspectorlib.bitfields as bitfields import inspectorlib.unpack as unpack @@ -48,7 +49,7 @@ class SMBIOS(unpack.Struct): matrix[types_present[index]] = [self.structures[index]] return matrix[num] except: - print("Failure: Type {} - not found".format(num)) + logging.debug("Failure: Type {} - not found".format(num)) class Header_2_1(unpack.Struct): def __new__(cls, u): @@ -171,7 +172,7 @@ class BIOSInformation(SmbiosBaseStructure): self.add_field('ec_minor_release', u.unpack_one("B")) except: self.decode_failure = True - print("Error parsing BIOSInformation") + logging.debug("Error parsing BIOSInformation") import traceback traceback.print_exc() self.fini() @@ -206,7 +207,7 @@ class SystemInformation(SmbiosBaseStructure): self.add_field('family', u.unpack_one("B"), self.fmtstr) except: self.decode_failure = True - print("Error parsing SystemInformation") + logging.debug("Error parsing SystemInformation") import traceback traceback.print_exc() self.fini() @@ -265,7 +266,7 @@ class BaseboardInformation(SmbiosBaseStructure): self.add_field('contained_object_handles', tuple(u.unpack_one(" 0x16: self.add_field('length_log_type_descriptor', u.unpack_one('B')) if self.length != (0x17 + (self.num_supported_log_type_descriptors * self.length_log_type_descriptor)): - print("Error: structure length ({}) != 0x17 + (num_supported_log_type_descriptors ({}) * length_log_type_descriptor({}))".format(self.length, self.num_supported_log_type_descriptors, self.length_log_type_descriptor)) - print("structure length = {}".format(self.length)) - print("num_supported_log_type_descriptors = {}".format(self.num_supported_log_type_descriptors)) - print("length_log_type_descriptor = {}".format(self.length_log_type_descriptor)) + logging.debug(f"Error: structure length ({self.length}) != 0x17 + (num_supported_log_type_descriptors ({self.num_supported_log_type_descriptors}) * length_log_type_descriptor({self.length_log_type_descriptor}))") + logging.debug("structure length = {}".format(self.length)) + logging.debug("num_supported_log_type_descriptors = {}".format(self.num_supported_log_type_descriptors)) + logging.debug("length_log_type_descriptor = {}".format(self.length_log_type_descriptor)) self.decodeFailure = True self.add_field('descriptors', tuple(EventLogDescriptor.unpack(u) for i in range(self.num_supported_log_type_descriptors)), unpack.format_each("\n{!r}")) except: self.decodeFailure = True - print("Error parsing SystemEventLog") + logging.debug("Error parsing SystemEventLog") import traceback traceback.print_exc() self.fini() @@ -1112,7 +1113,7 @@ class PhysicalMemoryArray(SmbiosBaseStructure): self.add_field('extended_maximum_capacity', u.unpack_one('