acrn-hypervisor/misc/acrn-config/xmls/config-xmls/apl-up2/logical_partition.xml
Wei Liu b4a61abe45 acrn-config: add hv configurations to scenario config xmls
Update the board defconfig to scenario config xmls, and user can configure
it from webUI.

Tracked-On: #4634
Signed-off-by: Wei Liu <weix.w.liu@intel.com>
Acked-by: Victor Sun <victor.sun@intel.com>
Acked-by: Terry Zou <terry.zou@intel.com>
2020-04-16 08:50:31 +08:00

154 lines
10 KiB
XML

<acrn-config board="apl-up2" scenario="logical_partition">
<hv>
<DEBUG_OPTIONS desc="Debug options for ACRN hypervisor, only valid on debug version">
<RELEASE desc="Release build. 'y' for Release, 'n' for Debug.">n</RELEASE>
<SERIAL_CONSOLE configurable="0" desc="The serial device which is used for hypervisor debug, only valid on Debug version.">/dev/ttyS0</SERIAL_CONSOLE>
<MEM_LOGLEVEL desc="Default loglevel in memory">5</MEM_LOGLEVEL>
<NPK_LOGLEVEL desc="Default loglevel for the hypervisor NPK log">5</NPK_LOGLEVEL>
<CONSOLE_LOGLEVEL desc="Default loglevel on the serial console">3</CONSOLE_LOGLEVEL>
<LOG_DESTINATION desc="Bitmap of consoles where logs are printed.">7</LOG_DESTINATION>
<LOG_BUF_SIZE desc="Capacity of logbuf for each physical cpu.">0x40000</LOG_BUF_SIZE>
</DEBUG_OPTIONS>
<FEATURES>
<RELOC desc="Enable hypervisor relocation">y</RELOC>
<SCHEDULER desc="The CPU scheduler to be used by the hypervisor.">SCHED_NOOP</SCHEDULER>
<MULTIBOOT2 desc="Support boot ACRN from multiboot2 protocol.">y</MULTIBOOT2>
<HYPERV_ENABLED desc="Enable Hyper-V enlightenment">y</HYPERV_ENABLED>
<IOMMU_ENFORCE_SNP desc="IOMMU enforce snoop behavior of DMA operation.">n</IOMMU_ENFORCE_SNP>
<ACPI_PARSE_ENABLED desc="Enable ACPI runtime parsing.">y</ACPI_PARSE_ENABLED>
<L1D_VMENTRY_ENABLED desc="Enable L1 cache flush before VM entry.">n</L1D_VMENTRY_ENABLED>
<MCE_ON_PSC_DISABLED desc="Force to disable software workaround for Machine Check Error on Page Size Change.">n</MCE_ON_PSC_DISABLED>
</FEATURES>
<MEMORY>
<STACK_SIZE desc="Capacity of one stack, in bytes.">0x2000</STACK_SIZE>
<HV_RAM_SIZE desc="Size of the RAM region used by the hypervisor">0x0b800000</HV_RAM_SIZE>
<HV_RAM_START desc="2M-aligned Start physical address of the RAM region used by the hypervisor.">0x00400000</HV_RAM_START>
<LOW_RAM_SIZE desc="Size of the low RAM region">0x00010000</LOW_RAM_SIZE>
<UOS_RAM_SIZE desc="Size of the User OS (UOS) RAM.">0x200000000</UOS_RAM_SIZE>
<SOS_RAM_SIZE desc="Size of the Service OS (SOS) RAM.">0x400000000</SOS_RAM_SIZE>
<PLATFORM_RAM_SIZE desc="Size of the physical platform RAM">0x400000000</PLATFORM_RAM_SIZE>
</MEMORY>
<CAPACITIES desc="Capacity limits for static assigned data struct or maximum supported resouce">
<IOMMU_BUS_NUM desc="Highest PCI bus ID used during IOMMU initialization.">0x100</IOMMU_BUS_NUM>
<MAX_IR_ENTRIES desc="Maximum number of Interrupt Remapping Entries.">256</MAX_IR_ENTRIES>
<MAX_IOAPIC_NUM desc="Maximum number of IO-APICs.">1</MAX_IOAPIC_NUM>
<MAX_KATA_VM_NUM desc="Maximum number of Kata Containers in SOS.">0</MAX_KATA_VM_NUM>
<MAX_PCI_DEV_NUM desc="Maximum number of PCI devices.">96</MAX_PCI_DEV_NUM>
<MAX_IOAPIC_LINES desc="Maximum number of interrupt lines per IOAPIC.">120</MAX_IOAPIC_LINES>
<MAX_PT_IRQ_ENTRIES desc="Maximum number of interrupt source for PT devices.">64</MAX_PT_IRQ_ENTRIES>
<MAX_MSIX_TABLE_NUM desc="Maximum number of MSI-X tables per device.">16</MAX_MSIX_TABLE_NUM>
<MAX_EMULATED_MMIO desc="Maximum number of emulated MMIO regions.">16</MAX_EMULATED_MMIO>
</CAPACITIES>
<MISC_CFG>
<GPU_SBDF desc="Segment, Bus, Device, and function of the GPU.">0x00000010</GPU_SBDF>
<UEFI_OS_LOADER_NAME desc="UEFI OS loader name.">\\EFI\\org.clearlinux\\bootloaderx64.efi</UEFI_OS_LOADER_NAME>
</MISC_CFG>
</hv>
<vm id="0">
<load_order desc="Specify the VM by its load order: PRE_LAUNCHED_VM, SOS_VM or POST_LAUNCHED_VM." readonly="true">PRE_LAUNCHED_VM</load_order>
<name desc="Specify the VM name which will be shown in hypervisor console command: vm_list.">ACRN PRE-LAUNCHED VM0</name>
<uuid configurable="0" desc="vm uuid">26c5e0d8-8f8a-47d8-8109-f201ebd61a5e</uuid>
<guest_flags desc="Select all applicable flags for the VM" multiselect="true">
<guest_flag></guest_flag>
<guest_flag></guest_flag>
</guest_flags>
<vcpu_affinity desc="vCPU affinity map. Each vCPU will pin to the selected pCPU ID. Please make sure each vCPU pin to different pCPU.">
<pcpu_id>0</pcpu_id>
<pcpu_id>2</pcpu_id>
</vcpu_affinity>
<clos configurable="0" desc="Class of Service for Cache Allocation Technology. Please refer SDM 17.19.2 for details and use with caution.">
<vcpu_clos>0</vcpu_clos>
</clos>
<epc_section desc="epc section">
<base desc="SGX EPC section base, must be page aligned">0</base>
<size desc="SGX EPC section size in Bytes, must be page aligned">0</size>
</epc_section>
<memory>
<start_hpa desc="The start physical address in host for the VM">0x100000000</start_hpa>
<size desc="The memory size in Bytes for the VM">0x20000000</size>
<start_hpa2 desc="Start of second HPA for non-contiguous allocations in host for the VM">0x0</start_hpa2>
<size_hpa2 desc="Memory size of second HPA for non-contiguous allocations in Bytes for the VM">0x0</size_hpa2>
</memory>
<os_config>
<name desc="Specify the OS name of VM, currently it is not referenced by hypervisor code.">ClearLinux</name>
<kern_type desc="Specify the kernel image type so that hypervisor could load it correctly. Currently support KERNEL_BZIMAGE and KERNEL_ZEPHYR.">KERNEL_BZIMAGE</kern_type>
<kern_mod desc="The tag for kernel image which act as multiboot module, it must exactly match the module tag in GRUB multiboot cmdline.">Linux_bzImage</kern_mod>
<ramdisk_mod desc="The tag for ramdisk image which act as multiboot module, it must exactly match the module tag in GRUB multiboot cmdline."></ramdisk_mod>
<rootfs desc="rootfs for Linux kernel">/dev/sda3</rootfs>
<bootargs desc="Specify kernel boot arguments">
rw rootwait noxsave nohpet no_timer_check ignore_loglevel log_buf_len=16M consoleblank=0 tsc=reliable
</bootargs>
</os_config>
<vuart id="0">
<type configurable="0" desc="vCOM1 type">VUART_LEGACY_PIO</type>
<base desc="vUART0 (A.K.A COM1) enabling switch. Enable by exposing its base address, disable by returning invalid base address." readonly="true">COM1_BASE</base>
<irq configurable="0" desc="vCOM1 irq">COM1_IRQ</irq>
</vuart>
<vuart id="1">
<type configurable="0" desc="vCOM2 type">VUART_LEGACY_PIO</type>
<base desc="vUART1 (A.K.A COM2) enabling switch. Enable by exposing its base address, disable by returning invalid base address.">COM2_BASE</base>
<irq configurable="0" desc="vCOM2 irq">COM2_IRQ</irq>
<target_vm_id desc="COM2 is used for VM communications. When it is enabled, please specify which target VM that current VM connect to.">1</target_vm_id>
<target_uart_id configurable="0" desc="target vUART ID that vCOM2 connect to">1</target_uart_id>
</vuart>
<pci_dev_num configurable="0" desc="pci devices number">VM0_CONFIG_PCI_DEV_NUM</pci_dev_num>
<pci_devs configurable="0" desc="pci devices list">vm0_pci_devs</pci_devs>
</vm>
<vm id="1">
<load_order desc="Specify the VM by its load order: PRE_LAUNCHED_VM, SOS_VM or POST_LAUNCHED_VM." readonly="true">PRE_LAUNCHED_VM</load_order>
<name desc="vm_name">ACRN PRE-LAUNCHED VM1</name>
<uuid configurable="0" desc="vm uuid">dd87ce08-66f9-473d-bc58-7605837f935e</uuid>
<guest_flags desc="Select all applicable flags for the VM" multiselect="true">
<guest_flag>GUEST_FLAG_RT</guest_flag>
<guest_flag>GUEST_FLAG_LAPIC_PASSTHROUGH</guest_flag>
</guest_flags>
<vcpu_affinity desc="vCPU affinity map. Each vCPU will pin to the selected pCPU ID. Please make sure each vCPU pin to different pCPU.">
<pcpu_id>1</pcpu_id>
<pcpu_id>3</pcpu_id>
</vcpu_affinity>
<clos configurable="0" desc="Class of Service for Cache Allocation Technology. Please refer SDM 17.19.2 for details and use with caution.">
<vcpu_clos>0</vcpu_clos>
</clos>
<epc_section desc="epc section">
<base desc="SGX EPC section base, must be page aligned">0</base>
<size desc="SGX EPC section size in Bytes, must be page aligned">0</size>
</epc_section>
<memory>
<start_hpa configurable="0" desc="The start physical address in host for the VM">0x120000000</start_hpa>
<size desc="The memory size in Bytes for the VM">0x20000000</size>
<start_hpa2 desc="Start of second HPA for non-contiguous allocations in host for the VM">0x0</start_hpa2>
<size_hpa2 desc="Memory size of second HPA for non-contiguous allocations in Bytes for the VM">0x0</size_hpa2>
</memory>
<os_config>
<name desc="Specify the OS name of VM, currently it is not referenced by hypervisor code.">ClearLinux</name>
<kern_type desc="kernel name">KERNEL_BZIMAGE</kern_type>
<kern_mod desc="The tag for kernel image which act as multiboot module, it must exactly match the module tag in GRUB multiboot cmdline.">Linux_bzImage</kern_mod>
<ramdisk_mod desc="The tag for ramdisk image which act as multiboot module, it must exactly match the module tag in GRUB multiboot cmdline."></ramdisk_mod>
<rootfs desc="rootfs for Linux kernel" readonly="true">/dev/sda3</rootfs>
<bootargs desc="Specify kernel boot arguments">
rw rootwait noxsave nohpet no_timer_check ignore_loglevel log_buf_len=16M
consoleblank=0 tsc=reliable
</bootargs>
</os_config>
<vuart id="0">
<type configurable="0" desc="vCOM1 type">VUART_LEGACY_PIO</type>
<base desc="vUART0 (A.K.A COM1) enabling switch. Enable by exposing its base address, disable by returning invalid base address." readonly="true">COM1_BASE</base>
<irq configurable="0" desc="vCOM1 irq">COM1_IRQ</irq>
</vuart>
<vuart id="1">
<type configurable="0" desc="vCOM2 type">VUART_LEGACY_PIO</type>
<base desc="vUART1 (A.K.A COM2) enabling switch. Enable by exposing its base address, disable by returning invalid base address.">COM2_BASE</base>
<irq configurable="0" desc="vCOM2 irq">COM2_IRQ</irq>
<target_vm_id desc="COM2 is used for VM communications. When it is enabled, please specify which target VM that current VM connect to.">0</target_vm_id>
<target_uart_id configurable="0" desc="target vUART ID that vCOM2 connect to">1</target_uart_id>
</vuart>
<pci_dev_num configurable="0" desc="pci devices number">VM1_CONFIG_PCI_DEV_NUM</pci_dev_num>
<pci_devs configurable="0" desc="pci devices list">vm1_pci_devs</pci_devs>
</vm>
</acrn-config>