HV: nuc7i7dnb example of new VM configuratons layout

There are 3 kinds of configurations in ACRN hypervisor source code: hypervisor
overall setting, per-board setting and scenario specific per-VM setting.
Currently Kconfig act as hypervisor overall setting and its souce is located at
"hypervisor/arch/x86/configs/$(BOARD).config"; Per-board configs are located at
"hypervisor/arch/x86/configs/$(BOARD)" folder; scenario specific per-VM configs
are located at "hypervisor/scenarios/$(SCENARIO)" folder.

This layout brings issues that board configs and VM configs are coupled tightly.
The board specific Kconfig file and misc_cfg.h are shared by all scenarios, and
scenario specific pci_dev.c is shared by all boards. So the user have no way to
build hypervisor binary for different scenario on different board with one
source code repo.

The patch will setup a new VM configurations layout as below:

  misc/vm_configs
  ├── boards                         --> folder of supported boards
  │   ├── <board_1>                  --> scenario-irrelevant board configs
  │   │   ├── board.c                --> C file of board configs
  │   │   ├── board_info.h           --> H file of board info
  │   │   ├── pci_devices.h          --> pBDF of PCI devices
  │   │   └── platform_acpi_info.h   --> native ACPI info
  │   ├── <board_2>
  │   ├── <board_3>
  │   └── <board...>
  └── scenarios                      --> folder of supported scenarios
      ├── <scenario_1>               --> scenario specific VM configs
      │   ├── <board_1>              --> board specific VM configs for <scenario_1>
      │   │   ├── <board_1>.config   --> Kconfig for specific scenario on specific board
      │   │   ├── misc_cfg.h         --> H file of board specific VM configs
      │   │   ├── pci_dev.c          --> board specific VM pci devices list
      │   │   └── vbar_base.h        --> vBAR base info of VM PT pci devices
      │   ├── <board_2>
      │   ├── <board_3>
      │   ├── <board...>
      │   ├── vm_configurations.c    --> C file of scenario specific VM configs
      │   └── vm_configurations.h    --> H file of scenario specific VM configs
      ├── <scenario_2>
      ├── <scenario_3>
      └── <scenario...>

The new layout would decouple board configs and VM configs completely:

The boards folder stores kinds of supported boards info, each board folder
stores scenario-irrelevant board configs only, which could be totally got from
a physical platform and works for all scenarios;

The scenarios folder stores VM configs of kinds of working scenario. In each
scenario folder, besides the generic scenario specific VM configs, the board
specific VM configs would be put in a embedded board folder.

In new layout, all configs files will be removed out of hypervisor folder and
moved to a separate folder. This would make hypervisor LoC calculation more
precisely with below fomula:
	typical LoC = Loc(hypervisor) + Loc(one vm_configs)
which
	Loc(one vm_configs) = Loc(misc/vm_configs/boards/<board>)
		+ LoC(misc/vm_configs/scenarios/<scenario>/<board>)
		+ Loc(misc/vm_configs/scenarios/<scenario>/vm_configurations.c
		+ Loc(misc/vm_configs/scenarios/<scenario>/vm_configurations.h

Tracked-On: #5077

Signed-off-by: Victor Sun <victor.sun@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Victor Sun
2020-07-24 09:23:53 +08:00
committed by wenlingz
parent 4ffa6cc7b1
commit e792fa3d3c
22 changed files with 124 additions and 144 deletions

View File

@@ -1,5 +0,0 @@
# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)
CONFIG_BOARD="nuc7i7dnb"
CONFIG_SERIAL_LEGACY=y
CONFIG_HV_RAM_START=0x41000000
CONFIG_RDT_ENABLED=n

View File

@@ -1,67 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <board.h>
#include <vtd.h>
#include <pci.h>
static struct dmar_dev_scope drhd0_dev_scope[DRHD0_DEV_CNT] = {
{
.type = DRHD0_DEVSCOPE0_TYPE,
.id = DRHD0_DEVSCOPE0_ID,
.bus = DRHD0_DEVSCOPE0_BUS,
.devfun = DRHD0_DEVSCOPE0_PATH
},
};
static struct dmar_dev_scope drhd1_dev_scope[DRHD1_DEV_CNT] = {
{
.type = DRHD1_DEVSCOPE0_TYPE,
.id = DRHD1_DEVSCOPE0_ID,
.bus = DRHD1_DEVSCOPE0_BUS,
.devfun = DRHD1_DEVSCOPE0_PATH
},
{
.type = DRHD1_DEVSCOPE1_TYPE,
.id = DRHD1_DEVSCOPE1_ID,
.bus = DRHD1_DEVSCOPE1_BUS,
.devfun = DRHD1_DEVSCOPE1_PATH
},
};
static struct dmar_drhd drhd_info_array[DRHD_COUNT] = {
{
.dev_cnt = DRHD0_DEV_CNT,
.segment = DRHD0_SEGMENT,
.flags = DRHD0_FLAGS,
.reg_base_addr = DRHD0_REG_BASE,
.ignore = DRHD0_IGNORE,
.devices = drhd0_dev_scope
},
{
.dev_cnt = DRHD1_DEV_CNT,
.segment = DRHD1_SEGMENT,
.flags = DRHD1_FLAGS,
.reg_base_addr = DRHD1_REG_BASE,
.ignore = DRHD1_IGNORE,
.devices = drhd1_dev_scope
},
};
struct dmar_info plat_dmar_info = {
.drhd_count = DRHD_COUNT,
.drhd_units = drhd_info_array,
};
#ifdef CONFIG_RDT_ENABLED
struct platform_clos_info platform_l2_clos_array[MAX_PLATFORM_CLOS_NUM];
struct platform_clos_info platform_l3_clos_array[MAX_PLATFORM_CLOS_NUM];
struct platform_clos_info platform_mba_clos_array[MAX_PLATFORM_CLOS_NUM];
#endif
const struct cpu_state_table board_cpu_state_tbl;
const union pci_bdf plat_hidden_pdevs[MAX_HIDDEN_PDEVS_NUM];
const struct vmsix_on_msi_info vmsix_on_msi_devs[MAX_VMSIX_ON_MSI_PDEVS_NUM];

View File

@@ -1,36 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef MISC_CFG_H
#define MISC_CFG_H
#define MAX_PCPU_NUM 4U
#define MAX_PLATFORM_CLOS_NUM 0U
#define MAX_VMSIX_ON_MSI_PDEVS_NUM 0U
#define ROOTFS_0 "root=/dev/sda3 "
#define ROOTFS_1 "root=/dev/nvme0n1p3 "
#define SOS_ROOTFS ROOTFS_0
#define SOS_CONSOLE "console=ttyS0 "
#define SOS_COM1_BASE 0x3F8U
#define SOS_COM1_IRQ 4U
#define SOS_COM2_BASE 0x2F8U
#define SOS_COM2_IRQ 3U
#ifndef CONFIG_RELEASE
#define SOS_BOOTARGS_DIFF "hvlog=2M@0xE00000 memmap=0x200000$0xE00000 "
#else
#define SOS_BOOTARGS_DIFF ""
#endif
#define MAX_HIDDEN_PDEVS_NUM 0U
#define HI_MMIO_START ~0UL
#define HI_MMIO_END 0UL
#endif /* MISC_CFG_H */

View File

@@ -1,26 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PCI_DEVICES_H_
#define PCI_DEVICES_H_
#define PTDEV_HI_MMIO_SIZE 0xe00000UL
#define SATA_CONTROLLER_0 .pbdf.bits = {.b = 0x00U, .d = 0x17U, .f = 0x00U}, \
.vbar_base[0] = PTDEV_HI_MMIO_START + 0x200000UL, \
.vbar_base[1] = PTDEV_HI_MMIO_START + 0x400000UL, \
.vbar_base[5] = PTDEV_HI_MMIO_START + 0x600000UL
#define USB_CONTROLLER_0 .pbdf.bits = {.b = 0x00U, .d = 0x14U, .f = 0x00U}, \
.vbar_base[0] = PTDEV_HI_MMIO_START + 0x800000UL
#define ETHERNET_CONTROLLER_0 .pbdf.bits = {.b = 0x00U, .d = 0x1fU, .f = 0x06U}, \
.vbar_base[0] = PTDEV_HI_MMIO_START + 0xa00000UL
#define NETWORK_CONTROLLER_0 .pbdf.bits = {.b = 0x01U, .d = 0x00U, .f = 0x00U}, \
.vbar_base[0] = PTDEV_HI_MMIO_START + 0xc00000UL
#endif /* PCI_DEVICES_H_ */

View File

@@ -1,73 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* This is a template header file for nuc7i7dnb platform ACPI info definition
* works when Kconfig of ENFORCE_VALIDATED_ACPI_INFO is disabled.
* When ENFORCE_VALIDATED_ACPI_INFO is enabled, we should use
* ./misc/acrn-config/target/board_parser.py running on target
* to generate nuc7i7dnb specific acpi info file named as nuc7i7dnb_acpi_info.h
* and put it in hypervisor/arch/x86/configs/nuc7i7dnb/.
*/
#ifndef PLATFORM_ACPI_INFO_H
#define PLATFORM_ACPI_INFO_H
/*
* BIOS Information
* Vendor: Intel Corp.
* Version: DNKBLi7v.86A.0065.2019.0611.1424
* Release Date: 06/11/2019
* BIOS Revision: 5.6
*
* Base Board Information
* Manufacturer: Intel Corporation
* Product Name: NUC7i7DNB
* Version: J83500-204
*/
/* pm sstate data */
#define PM1A_EVT_ADDRESS 0x1800UL
#define PM1A_EVT_ACCESS_SIZE 0x2U
#define PM1A_CNT_ADDRESS 0x1804UL
#define WAKE_VECTOR_32 0x7FA22F8CUL
#define WAKE_VECTOR_64 0x7FA22F98UL
#define RESET_REGISTER_ADDRESS 0xCF9UL
#define RESET_REGISTER_SPACE_ID SPACE_SYSTEM_IO
#define RESET_REGISTER_VALUE 0x6U
/* PCI mmcfg base of MCFG */
#define DEFAULT_PCI_MMCFG_BASE 0xE0000000UL
/* DRHD of DMAR */
#define DRHD_COUNT 2U
#define DRHD0_DEV_CNT 0x1U
#define DRHD0_SEGMENT 0x0U
#define DRHD0_FLAGS 0x0U
#define DRHD0_REG_BASE 0xFED90000UL
#define DRHD0_IGNORE true
#define DRHD0_DEVSCOPE0_TYPE 0x1U
#define DRHD0_DEVSCOPE0_ID 0x0U
#define DRHD0_DEVSCOPE0_BUS 0x0U
#define DRHD0_DEVSCOPE0_PATH 0x10U
#define DRHD1_DEV_CNT 0x2U
#define DRHD1_SEGMENT 0x0U
#define DRHD1_FLAGS 0x1U
#define DRHD1_REG_BASE 0xFED91000UL
#define DRHD1_IGNORE false
#define DRHD1_DEVSCOPE0_TYPE 0x3U
#define DRHD1_DEVSCOPE0_ID 0x2U
#define DRHD1_DEVSCOPE0_BUS 0xf0U
#define DRHD1_DEVSCOPE0_PATH 0xf8U
#define DRHD1_DEVSCOPE1_TYPE 0x4U
#define DRHD1_DEVSCOPE1_ID 0x0U
#define DRHD1_DEVSCOPE1_BUS 0x0U
#define DRHD1_DEVSCOPE1_PATH 0xf8U
#endif /* PLATFORM_ACPI_INFO_H */

View File

@@ -30,7 +30,7 @@ extern struct dmar_info plat_dmar_info;
#ifdef CONFIG_RDT_ENABLED
extern struct platform_clos_info platform_l2_clos_array[MAX_PLATFORM_CLOS_NUM];
extern struct platform_clos_info platform_l3_clos_array[MAX_PLATFORM_CLOS_NUM];
extern struct platform_clos_info platform_mba_clos_array[MAX_PLATFORM_CLOS_NUM];
extern struct platform_clos_info platform_mba_clos_array[MAX_MBA_CLOS_NUM_ENTRIES];
#endif
extern const struct cpu_state_table board_cpu_state_tbl;

View File

@@ -7,7 +7,7 @@
#ifndef PAGE_H
#define PAGE_H
#include <pci_devices.h>
#include <board_info.h>
#define PAGE_SHIFT 12U
#define PAGE_SIZE (1U << PAGE_SHIFT)
@@ -31,14 +31,14 @@
* - Guest OS won't re-program device MMIO bars to the address not covered by
* this EPT_ADDRESS_SPACE.
*/
#define EPT_ADDRESS_SPACE(size) ((size > MEM_2G) ? \
#define EPT_ADDRESS_SPACE(size) (((size) > MEM_2G) ? \
((size) + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE) \
: (MEM_2G + PLATFORM_LO_MMIO_SIZE + PLATFORM_HI_MMIO_SIZE))
#define PTDEV_HI_MMIO_START ((CONFIG_UOS_RAM_SIZE > MEM_2G) ? \
(CONFIG_UOS_RAM_SIZE + PLATFORM_LO_MMIO_SIZE) : (MEM_2G + PLATFORM_LO_MMIO_SIZE))
#define PRE_VM_EPT_ADDRESS_SPACE(size) (PTDEV_HI_MMIO_START + PTDEV_HI_MMIO_SIZE)
#define PRE_VM_EPT_ADDRESS_SPACE(size) (PTDEV_HI_MMIO_START + HI_MMIO_SIZE)
#define TOTAL_EPT_4K_PAGES_SIZE (PRE_VM_NUM*(PT_PAGE_NUM(PRE_VM_EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))*MEM_4K)) + \
(SOS_VM_NUM*(PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))*MEM_4K)) + \

View File

@@ -1,90 +0,0 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm_config.h>
#include <vuart.h>
#include <pci_dev.h>
struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
{ /* VM0 */
CONFIG_SAFETY_VM(1),
.name = "ACRN PRE-LAUNCHED VM0",
.guest_flags = 0UL,
.cpu_affinity = VM0_CONFIG_CPU_AFFINITY,
.memory = {
.start_hpa = VM0_CONFIG_MEM_START_HPA,
.size = VM0_CONFIG_MEM_SIZE,
},
.os_config = {
.name = "Zephyr",
.kernel_type = KERNEL_ZEPHYR,
.kernel_mod_tag = "Zephyr_RawImage",
.bootargs = "",
.kernel_load_addr = 0x100000,
.kernel_entry_addr = 0x100000,
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM2_BASE,
.irq = COM2_IRQ,
.t_vuart.vm_id = 1U,
.t_vuart.vuart_id = 1U,
},
#ifdef VM0_PASSTHROUGH_TPM
.pt_tpm2 = true,
.mmiodevs[0] = {
.base_gpa = VM0_TPM_BUFFER_BASE_ADDR,
.base_hpa = 0xFED40000UL,
.size = VM0_TPM_BUFFER_SIZE,
},
#endif
},
{ /* VM1 */
CONFIG_SOS_VM,
.name = "ACRN SOS VM",
.guest_flags = 0UL,
.memory = {
.start_hpa = 0UL,
.size = CONFIG_SOS_RAM_SIZE,
},
.os_config = {
.name = "ACRN Service OS",
.kernel_type = KERNEL_BZIMAGE,
.kernel_mod_tag = "Linux_bzImage",
.bootargs = SOS_VM_BOOTARGS,
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = SOS_COM1_BASE,
.irq = SOS_COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = SOS_COM2_BASE,
.irq = SOS_COM2_IRQ,
.t_vuart.vm_id = 0U,
.t_vuart.vuart_id = 1U,
},
},
{ /* VM2 */
CONFIG_POST_STD_VM(1),
.cpu_affinity = VM2_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
}
};

View File

@@ -1,44 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_CONFIGURATIONS_H
#define VM_CONFIGURATIONS_H
#include <misc_cfg.h>
/* Bits mask of guest flags that can be programmed by device model. Other bits are set by hypervisor only */
#define DM_OWNED_GUEST_FLAG_MASK (GUEST_FLAG_SECURE_WORLD_ENABLED | GUEST_FLAG_LAPIC_PASSTHROUGH | \
GUEST_FLAG_RT | GUEST_FLAG_IO_COMPLETION_POLLING)
/* SOS_VM_NUM can only be 0U or 1U;
* When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;
* MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM;
*/
#define PRE_VM_NUM 1U
#define SOS_VM_NUM 1U
#define MAX_POST_VM_NUM 1U
#define CONFIG_MAX_KATA_VM_NUM 0U
#define VM0_CONFIG_CPU_AFFINITY (AFFINITY_CPU(3U))
#define VM0_CONFIG_MEM_START_HPA 0x100000000UL
#define VM0_CONFIG_MEM_SIZE 0x20000000UL
#define SOS_VM_BOOTARGS SOS_ROOTFS \
"rw rootwait " \
"console=tty0 " \
SOS_CONSOLE \
"consoleblank=0 " \
"no_timer_check " \
"quiet loglevel=3 " \
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x010700 " \
"i915.domain_plane_owners=0x011100001111 " \
"i915.enable_gvt=1 " \
SOS_IDLE \
SOS_BOOTARGS_DIFF
#define VM2_CONFIG_CPU_AFFINITY (AFFINITY_CPU(2U))
#endif /* VM_CONFIGURATIONS_H */

View File

@@ -1,135 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm_config.h>
#include <vuart.h>
#include <pci_dev.h>
struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
{ /* VM0 */
CONFIG_SOS_VM,
.name = "ACRN SOS VM",
.guest_flags = 0UL,
.clos = { 0U },
.memory = {
.start_hpa = 0UL,
.size = CONFIG_SOS_RAM_SIZE,
},
.os_config = {
.name = "ACRN Service OS",
.kernel_type = KERNEL_BZIMAGE,
.kernel_mod_tag = "Linux_bzImage",
.bootargs = SOS_VM_BOOTARGS
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = SOS_COM1_BASE,
.irq = SOS_COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = SOS_COM2_BASE,
.irq = SOS_COM2_IRQ,
.t_vuart.vm_id = 2U,
.t_vuart.vuart_id = 1U,
},
},
{ /* VM1 */
CONFIG_POST_STD_VM(1),
.cpu_affinity = VM1_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
{ /* VM2 */
CONFIG_POST_RT_VM(1),
.guest_flags = 0UL,
.cpu_affinity = VM2_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM2_BASE,
.irq = COM2_IRQ,
.t_vuart.vm_id = 0U,
.t_vuart.vuart_id = 1U,
},
},
{ /* VM3 */
CONFIG_POST_STD_VM(2),
.cpu_affinity = VM3_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
{ /* VM4 */
CONFIG_POST_STD_VM(3),
.cpu_affinity = VM4_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
{ /* VM5 */
CONFIG_POST_STD_VM(4),
.cpu_affinity = VM5_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
{ /* VM6 */
CONFIG_POST_STD_VM(5),
.cpu_affinity = VM6_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
{ /* VM7 */
CONFIG_KATA_VM(1),
.cpu_affinity = VM7_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
};

View File

@@ -1,47 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_CONFIGURATIONS_H
#define VM_CONFIGURATIONS_H
#include <misc_cfg.h>
/* SOS_VM_NUM can only be 0U or 1U;
* When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;
* MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM;
*/
#define PRE_VM_NUM 0U
#define SOS_VM_NUM 1U
#define MAX_POST_VM_NUM 7U
#define CONFIG_MAX_KATA_VM_NUM 1U
/* Bits mask of guest flags that can be programmed by device model. Other bits are set by hypervisor only */
#define DM_OWNED_GUEST_FLAG_MASK (GUEST_FLAG_SECURE_WORLD_ENABLED | GUEST_FLAG_LAPIC_PASSTHROUGH | \
GUEST_FLAG_RT | GUEST_FLAG_IO_COMPLETION_POLLING)
#define SOS_VM_BOOTARGS SOS_ROOTFS \
"rw rootwait " \
"console=tty0 " \
SOS_CONSOLE \
"consoleblank=0 " \
"no_timer_check " \
"quiet loglevel=3 " \
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x01010F " \
"i915.domain_plane_owners=0x011111110000 " \
"i915.enable_gvt=1 " \
SOS_IDLE \
SOS_BOOTARGS_DIFF
#define VM1_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#define VM2_CONFIG_CPU_AFFINITY (AFFINITY_CPU(2U) | AFFINITY_CPU(3U))
#define VM3_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#define VM4_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#define VM5_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#define VM6_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#define VM7_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(1U))
#endif /* VM_CONFIGURATIONS_H */

View File

@@ -1,54 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm_config.h>
#include <pci_devices.h>
#include <vpci.h>
#include <mmu.h>
#include <page.h>
/* The vbar_base info of pt devices is included in device MACROs which defined in
* arch/x86/configs/$(CONFIG_BOARD)/pci_devices.h.
* The memory range of vBAR should exactly match with the e820 layout of VM.
*/
struct acrn_vm_pci_dev_config vm0_pci_devs[VM0_CONFIG_PCI_DEV_NUM] = {
{
.emu_type = PCI_DEV_TYPE_HVEMUL,
.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},
.vdev_ops = &vhostbridge_ops,
},
{
.emu_type = PCI_DEV_TYPE_PTDEV,
.vbdf.bits = {.b = 0x00U, .d = 0x01U, .f = 0x00U},
VM0_STORAGE_CONTROLLER
},
{
.emu_type = PCI_DEV_TYPE_PTDEV,
.vbdf.bits = {.b = 0x00U, .d = 0x02U, .f = 0x00U},
VM0_NETWORK_CONTROLLER
},
};
struct acrn_vm_pci_dev_config vm1_pci_devs[VM1_CONFIG_PCI_DEV_NUM] = {
{
.emu_type = PCI_DEV_TYPE_HVEMUL,
.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},
.vdev_ops = &vhostbridge_ops,
},
{
.emu_type = PCI_DEV_TYPE_PTDEV,
.vbdf.bits = {.b = 0x00U, .d = 0x01U, .f = 0x00U},
VM1_STORAGE_CONTROLLER
},
#if defined(VM1_NETWORK_CONTROLLER)
{
.emu_type = PCI_DEV_TYPE_PTDEV,
.vbdf.bits = {.b = 0x00U, .d = 0x02U, .f = 0x00U},
VM1_NETWORK_CONTROLLER
},
#endif
};

View File

@@ -1,83 +0,0 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm_config.h>
#include <vuart.h>
extern struct acrn_vm_pci_dev_config vm0_pci_devs[VM0_CONFIG_PCI_DEV_NUM];
extern struct acrn_vm_pci_dev_config vm1_pci_devs[VM1_CONFIG_PCI_DEV_NUM];
struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
{ /* VM0 */
CONFIG_PRE_STD_VM(1),
.name = "ACRN PRE-LAUNCHED VM0",
.cpu_affinity = VM0_CONFIG_CPU_AFFINITY,
.memory = {
.start_hpa = VM0_CONFIG_MEM_START_HPA,
.size = VM0_CONFIG_MEM_SIZE,
},
.os_config = {
.name = "ClearLinux",
.kernel_type = KERNEL_BZIMAGE,
.kernel_mod_tag = "Linux_bzImage",
.bootargs = VM0_CONFIG_OS_BOOTARG_CONSOLE \
VM0_CONFIG_OS_BOOTARG_MAXCPUS \
VM0_CONFIG_OS_BOOTARG_ROOT \
"rw rootwait noxsave nohpet \
no_timer_check ignore_loglevel log_buf_len=16M \
consoleblank=0 tsc=reliable"
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM2_BASE,
.irq = COM2_IRQ,
.t_vuart.vm_id = 1U,
.t_vuart.vuart_id = 1U,
},
.pci_dev_num = VM0_CONFIG_PCI_DEV_NUM,
.pci_devs = vm0_pci_devs,
},
{ /* VM1 */
CONFIG_PRE_STD_VM(2),
.name = "ACRN PRE-LAUNCHED VM1",
.cpu_affinity = VM1_CONFIG_CPU_AFFINITY,
.guest_flags = (GUEST_FLAG_RT | GUEST_FLAG_LAPIC_PASSTHROUGH),
.memory = {
.start_hpa = VM1_CONFIG_MEM_START_HPA,
.size = VM1_CONFIG_MEM_SIZE,
},
.os_config = {
.name = "ClearLinux",
.kernel_type = KERNEL_BZIMAGE,
.kernel_mod_tag = "Linux_bzImage",
.bootargs = VM1_CONFIG_OS_BOOTARG_CONSOLE \
VM1_CONFIG_OS_BOOTARG_MAXCPUS \
VM1_CONFIG_OS_BOOTARG_ROOT \
"rw rootwait noxsave nohpet \
no_timer_check ignore_loglevel log_buf_len=16M \
consoleblank=0 tsc=reliable"
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM1_BASE,
.irq = COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = COM2_BASE,
.irq = COM2_IRQ,
.t_vuart.vm_id = 0U,
.t_vuart.vuart_id = 1U,
},
.pci_dev_num = VM1_CONFIG_PCI_DEV_NUM,
.pci_devs = vm1_pci_devs,
},
};

View File

@@ -1,73 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_CONFIGURATIONS_H
#define VM_CONFIGURATIONS_H
#include <pci_devices.h>
#include <misc_cfg.h>
/* Bits mask of guest flags that can be programmed by device model. Other bits are set by hypervisor only */
#define DM_OWNED_GUEST_FLAG_MASK 0UL
/* SOS_VM_NUM can only be 0U or 1U;
* When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;
* MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM;
*/
#define PRE_VM_NUM 2U
#define SOS_VM_NUM 0U
#define MAX_POST_VM_NUM 0U
#define CONFIG_MAX_KATA_VM_NUM 0U
/* The VM CONFIGs like:
* VMX_CONFIG_CPU_AFFINITY
* VMX_CONFIG_MEM_START_HPA
* VMX_CONFIG_MEM_SIZE
* VMX_CONFIG_OS_BOOTARG_ROOT
* VMX_CONFIG_OS_BOOTARG_MAX_CPUS
* VMX_CONFIG_OS_BOOTARG_CONSOLE
* might be different on your board, please modify them per your needs.
*/
#define VM0_CONFIG_CPU_AFFINITY (AFFINITY_CPU(0U) | AFFINITY_CPU(2U))
#define VM0_CONFIG_MEM_START_HPA 0x100000000UL
#define VM0_CONFIG_MEM_SIZE 0x20000000UL
#define VM0_CONFIG_OS_BOOTARG_ROOT ROOTFS_0
#define VM0_CONFIG_OS_BOOTARG_MAXCPUS "maxcpus=2 "
#define VM0_CONFIG_OS_BOOTARG_CONSOLE "console=ttyS0 "
#define VM1_CONFIG_CPU_AFFINITY (AFFINITY_CPU(1U) | AFFINITY_CPU(3U))
#define VM1_CONFIG_MEM_START_HPA 0x120000000UL
#define VM1_CONFIG_MEM_SIZE 0x20000000UL
#define VM1_CONFIG_OS_BOOTARG_ROOT ROOTFS_0
#define VM1_CONFIG_OS_BOOTARG_MAXCPUS "maxcpus=2 "
#define VM1_CONFIG_OS_BOOTARG_CONSOLE "console=ttyS0 "
/* VM pass-through devices assign policy:
* VM0: one Mass Storage controller, one Network controller;
* VM1: one Mass Storage controller, one Network controller(if a secondary Network controller class device exist);
*/
#define VM0_STORAGE_CONTROLLER SATA_CONTROLLER_0
#define VM0_NETWORK_CONTROLLER ETHERNET_CONTROLLER_0
#define VM0_CONFIG_PCI_DEV_NUM 3U
#define VM1_STORAGE_CONTROLLER USB_CONTROLLER_0
#if defined(ETHERNET_CONTROLLER_1)
/* if a secondary Ethernet controller subclass exist, assign to VM1 */
#define VM1_NETWORK_CONTROLLER ETHERNET_CONTROLLER_1
#elif defined(NETWORK_CONTROLLER_0)
/* if a Network controller subclass exist(usually it is a wireless network card), assign to VM1 */
#define VM1_NETWORK_CONTROLLER NETWORK_CONTROLLER_0
#endif
#if defined(VM1_NETWORK_CONTROLLER)
#define VM1_CONFIG_PCI_DEV_NUM 3U
#else
/* no network controller could be assigned to VM1 */
#define VM1_CONFIG_PCI_DEV_NUM 2U
#endif
#endif /* VM_CONFIGURATIONS_H */

View File

@@ -1,65 +0,0 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm_config.h>
#include <vuart.h>
#include <pci_dev.h>
struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
{ /* VM0 */
CONFIG_SOS_VM,
.name = "ACRN SOS VM",
/* Allow SOS to reboot the host since there is supposed to be the highest severity guest */
.guest_flags = 0UL,
.memory = {
.start_hpa = 0UL,
.size = CONFIG_SOS_RAM_SIZE,
},
.os_config = {
.name = "ACRN Service OS",
.kernel_type = KERNEL_BZIMAGE,
.kernel_mod_tag = "Linux_bzImage",
.bootargs = SOS_VM_BOOTARGS,
},
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = SOS_COM1_BASE,
.irq = SOS_COM1_IRQ,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
},
},
{ /* VM1 */
CONFIG_POST_STD_VM(1),
.cpu_affinity = VM1_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
#if CONFIG_MAX_KATA_VM_NUM > 0
{ /* VM2 */
CONFIG_KATA_VM(1),
.cpu_affinity = VM2_CONFIG_CPU_AFFINITY,
.vuart[0] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
},
.vuart[1] = {
.type = VUART_LEGACY_PIO,
.addr.port_base = INVALID_COM_BASE,
}
},
#endif
};

View File

@@ -1,45 +0,0 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef VM_CONFIGURATIONS_H
#define VM_CONFIGURATIONS_H
#include <misc_cfg.h>
/* SOS_VM_NUM can only be 0U or 1U;
* When SOS_VM_NUM is 0U, MAX_POST_VM_NUM must be 0U too;
* MAX_POST_VM_NUM must be bigger than CONFIG_MAX_KATA_VM_NUM;
*/
#define PRE_VM_NUM 0U
#define SOS_VM_NUM 1U
#define MAX_POST_VM_NUM 2U /* including 1 KATA VM */
#define CONFIG_MAX_KATA_VM_NUM 1U
/* Bits mask of guest flags that can be programmed by device model. Other bits are set by hypervisor only */
#define DM_OWNED_GUEST_FLAG_MASK (GUEST_FLAG_SECURE_WORLD_ENABLED | GUEST_FLAG_LAPIC_PASSTHROUGH | \
GUEST_FLAG_RT | GUEST_FLAG_IO_COMPLETION_POLLING)
#define SOS_VM_BOOTARGS SOS_ROOTFS \
"rw rootwait " \
"console=tty0 " \
SOS_CONSOLE \
"consoleblank=0 " \
"no_timer_check " \
"quiet loglevel=3 " \
"i915.nuclear_pageflip=1 " \
"i915.avail_planes_per_pipe=0x01010F " \
"i915.domain_plane_owners=0x011111110000 " \
"i915.enable_gvt=1 " \
SOS_IDLE \
SOS_BOOTARGS_DIFF
#if CONFIG_MAX_KATA_VM_NUM > 0
#define VM1_CONFIG_CPU_AFFINITY (AFFINITY_CPU(1U) | AFFINITY_CPU(2U))
#define VM2_CONFIG_CPU_AFFINITY (AFFINITY_CPU(3U))
#else
#define VM1_CONFIG_CPU_AFFINITY (AFFINITY_CPU(1U) | AFFINITY_CPU(2U) | AFFINITY_CPU(3U))
#endif
#endif /* VM_CONFIGURATIONS_H */