HV: refine launch vm interface

When pcpu enter guest mode, it will call launch_vms() function to launch
VMs in global vm_configs array.

In launch_vms() function, current pcpu will go through vm_config array list
and check whether it is a bsp of configured VM, if yes then it will prepare
corresponding VM and start it. The index of vm_config array will be specified
to VM id.

The first least significant bit in pcpu_bitmap means the bsp of the VM.

Tracked-On: #2291

Signed-off-by: Victor Sun <victor.sun@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Victor Sun 2019-01-21 15:17:23 +08:00 committed by Eddie Dong
parent 49e6deaf26
commit e6117e0d5b
6 changed files with 236 additions and 249 deletions

View File

@ -17,29 +17,34 @@ vm_sw_loader_t vm_sw_loader;
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE); static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
static uint64_t vmid_bitmap; static struct acrn_vm *sos_vm_ptr = NULL;
static inline uint16_t alloc_vm_id(void) uint16_t find_free_vm_id(void)
{ {
uint16_t id = ffz64(vmid_bitmap); uint16_t id;
struct acrn_vm_config *vm_config;
while (id < CONFIG_MAX_VM_NUM) { for (id = 0U; id < CONFIG_MAX_VM_NUM; id++) {
if (!bitmap_test_and_set_lock(id, &vmid_bitmap)) { vm_config = get_vm_config(id);
if (vm_config->type == UNDEFINED_VM) {
break; break;
} }
id = ffz64(vmid_bitmap);
} }
return (id < CONFIG_MAX_VM_NUM) ? id : INVALID_VM_ID; return (vm_config->type == UNDEFINED_VM) ? id : INVALID_VM_ID;
} }
static inline void free_vm_id(const struct acrn_vm *vm) static inline void free_vm_id(const struct acrn_vm *vm)
{ {
bitmap_clear_lock(vm->vm_id, &vmid_bitmap); struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
if (vm_config != NULL) {
vm_config->type = UNDEFINED_VM;
}
} }
static inline bool is_vm_valid(uint16_t vm_id) bool is_sos_vm(const struct acrn_vm *vm)
{ {
return bitmap_test(vm_id, &vmid_bitmap); return (vm != NULL) && (vm == sos_vm_ptr);
} }
/** /**
@ -57,159 +62,152 @@ static void setup_io_bitmap(struct acrn_vm *vm)
} }
} }
/* return a pointer to the virtual machine structure associated with /**
* return a pointer to the virtual machine structure associated with
* this VM ID * this VM ID
*
* @pre vm_id < CONFIG_MAX_VM_NUM
*/ */
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id) struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
{ {
struct acrn_vm *ret; return &vm_array[vm_id];
if (is_vm_valid(vm_id)) {
ret = &vm_array[vm_id];
} else {
ret = NULL;
}
return ret;
} }
/** /**
* @pre vm_config != NULL && rtn_vm != NULL * @pre vm_config != NULL
*/ */
int32_t create_vm(struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm) static inline uint16_t get_vm_bsp_pcpu_id(const struct acrn_vm_config *vm_config)
{
uint16_t cpu_id = INVALID_CPU_ID;
cpu_id = ffs64(vm_config->pcpu_bitmap);
return (cpu_id < get_pcpu_nums()) ? cpu_id : INVALID_CPU_ID;
}
/**
* @pre vm_id < CONFIG_MAX_VM_NUM && vm_config != NULL && rtn_vm != NULL
*/
int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm)
{ {
struct acrn_vm *vm = NULL; struct acrn_vm *vm = NULL;
int32_t status = 0; int32_t status = 0;
uint16_t vm_id;
bool need_cleanup = false; bool need_cleanup = false;
/* Allocate memory for virtual machine */
vm = &vm_array[vm_id];
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
vm->vm_id = vm_id;
#ifdef CONFIG_PARTITION_MODE #ifdef CONFIG_PARTITION_MODE
vm_id = vm_config->vm_id; /* Map Virtual Machine to its VM Description */
bitmap_set_lock(vm_id, &vmid_bitmap); vm->vm_config = vm_config;
#else
vm_id = alloc_vm_id();
#endif #endif
vm->hw.created_vcpus = 0U;
vm->emul_mmio_regions = 0U;
vm->snoopy_mem = true;
if (vm_id < CONFIG_MAX_VM_NUM) { /* gpa_lowtop are used for system start up */
/* Allocate memory for virtual machine */ vm->hw.gpa_lowtop = 0UL;
vm = &vm_array[vm_id];
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
vm->vm_id = vm_id;
#ifdef CONFIG_PARTITION_MODE
/* Map Virtual Machine to its VM Description */
vm->vm_config = vm_config;
#endif
vm->hw.created_vcpus = 0U;
vm->emul_mmio_regions = 0U;
vm->snoopy_mem = true;
/* gpa_lowtop are used for system start up */ init_ept_mem_ops(vm);
vm->hw.gpa_lowtop = 0UL; vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info);
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
init_ept_mem_ops(vm); /* Only for SOS: Configure VM software information */
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info); /* For UOS: This VM software information is configure in DM */
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp); if (is_sos_vm(vm)) {
vm->snoopy_mem = false;
/* Only for SOS: Configure VM software information */ rebuild_sos_vm_e820();
/* For UOS: This VM software information is configure in DM */ prepare_sos_vm_memmap(vm);
if (is_sos_vm(vm)) {
vm->snoopy_mem = false;
rebuild_sos_vm_e820();
prepare_sos_vm_memmap(vm);
#ifndef CONFIG_EFI_STUB #ifndef CONFIG_EFI_STUB
status = init_vm_boot_info(vm); status = init_vm_boot_info(vm);
#else #else
status = efi_boot_init(); status = efi_boot_init();
#endif #endif
if (status == 0) {
init_iommu_sos_vm_domain(vm);
} else {
need_cleanup = true;
}
} else {
/* populate UOS vm fields according to vm_config */
vm->sworld_control.flag.supported = vm_config->sworld_supported;
if (vm->sworld_control.flag.supported != 0UL) {
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
}
(void)memcpy_s(&vm->GUID[0], sizeof(vm->GUID),
&vm_config->GUID[0], sizeof(vm_config->GUID));
#ifdef CONFIG_PARTITION_MODE
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
vm_config->start_hpa, 0UL, vm_config->mem_size,
EPT_RWX|EPT_WB);
init_vm_boot_info(vm);
#endif
}
if (status == 0) { if (status == 0) {
enable_iommu(); init_iommu_sos_vm_domain(vm);
} else {
INIT_LIST_HEAD(&vm->softirq_dev_entry_list); need_cleanup = true;
spinlock_init(&vm->softirq_dev_lock);
vm->intr_inject_delay_delta = 0UL;
/* Set up IO bit-mask such that VM exit occurs on
* selected IO ranges
*/
setup_io_bitmap(vm);
vm_setup_cpu_state(vm);
if (is_sos_vm(vm)) {
/* Load pm S state data */
if (vm_load_pm_s_state(vm) == 0) {
register_pm1ab_handler(vm);
}
/* Create virtual uart; just when uart enabled, vuart can work */
if (is_dbg_uart_enabled()) {
vuart_init(vm);
}
}
vpic_init(vm);
#ifdef CONFIG_PARTITION_MODE
/* Create virtual uart; just when uart enabled, vuart can work */
if (vm_config->vm_vuart && is_dbg_uart_enabled()) {
vuart_init(vm);
}
vrtc_init(vm);
#endif
vpci_init(vm);
/* vpic wire_mode default is INTR */
vm->wire_mode = VPIC_WIRE_INTR;
/* Init full emulated vIOAPIC instance */
vioapic_init(vm);
/* Populate return VM handle */
*rtn_vm = vm;
vm->sw.io_shared_page = NULL;
#ifdef CONFIG_IOREQ_POLLING
/* Now, enable IO completion polling mode for all VMs with CONFIG_IOREQ_POLLING. */
vm->sw.is_completion_polling = true;
#endif
status = set_vcpuid_entries(vm);
if (status == 0) {
vm->state = VM_CREATED;
} else {
need_cleanup = true;
}
} }
} else { } else {
pr_err("%s, vm id is invalid!\n", __func__); /* populate UOS vm fields according to vm_config */
status = -ENODEV; vm->sworld_control.flag.supported = vm_config->sworld_supported;
if (vm->sworld_control.flag.supported != 0UL) {
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
}
(void)memcpy_s(&vm->GUID[0], sizeof(vm->GUID),
&vm_config->GUID[0], sizeof(vm_config->GUID));
#ifdef CONFIG_PARTITION_MODE
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
vm_config->start_hpa, 0UL, vm_config->mem_size,
EPT_RWX|EPT_WB);
init_vm_boot_info(vm);
#endif
}
if (status == 0) {
enable_iommu();
INIT_LIST_HEAD(&vm->softirq_dev_entry_list);
spinlock_init(&vm->softirq_dev_lock);
vm->intr_inject_delay_delta = 0UL;
/* Set up IO bit-mask such that VM exit occurs on
* selected IO ranges
*/
setup_io_bitmap(vm);
vm_setup_cpu_state(vm);
if (is_sos_vm(vm)) {
/* Load pm S state data */
if (vm_load_pm_s_state(vm) == 0) {
register_pm1ab_handler(vm);
}
/* Create virtual uart; just when uart enabled, vuart can work */
if (is_dbg_uart_enabled()) {
vuart_init(vm);
}
}
vpic_init(vm);
#ifdef CONFIG_PARTITION_MODE
/* Create virtual uart; just when uart enabled, vuart can work */
if (vm_config->vm_vuart && is_dbg_uart_enabled()) {
vuart_init(vm);
}
vrtc_init(vm);
#endif
vpci_init(vm);
/* vpic wire_mode default is INTR */
vm->wire_mode = VPIC_WIRE_INTR;
/* Init full emulated vIOAPIC instance */
vioapic_init(vm);
/* Populate return VM handle */
*rtn_vm = vm;
vm->sw.io_shared_page = NULL;
#ifdef CONFIG_IOREQ_POLLING
/* Now, enable IO completion polling mode for all VMs with CONFIG_IOREQ_POLLING. */
vm->sw.is_completion_polling = true;
#endif
status = set_vcpuid_entries(vm);
if (status == 0) {
vm->state = VM_CREATED;
} else {
need_cleanup = true;
}
} }
if (need_cleanup && (vm != NULL)) { if (need_cleanup && (vm != NULL)) {
@ -369,23 +367,21 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
schedule_vcpu(bsp); schedule_vcpu(bsp);
} }
#ifdef CONFIG_PARTITION_MODE /**
/* Create vm/vcpu for vm */ * Prepare to create vm/vcpu for vm
int32_t prepare_vm(uint16_t pcpu_id) *
* @pre vm_id < CONFIG_MAX_VM_NUM && vm_config != NULL
*/
void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config)
{ {
int32_t ret = 0; int32_t err = 0;
uint16_t i; uint16_t i;
struct acrn_vm *vm = NULL; struct acrn_vm *vm = NULL;
struct acrn_vm_config *vm_config = NULL;
bool is_vm_bsp;
vm_config = pcpu_vm_config_map[pcpu_id].vm_config_ptr; err = create_vm(vm_id, vm_config, &vm);
is_vm_bsp = pcpu_vm_config_map[pcpu_id].is_bsp;
if (is_vm_bsp) {
ret = create_vm(vm_config, &vm);
ASSERT(ret == 0, "VM creation failed!");
if (err == 0) {
#ifdef CONFIG_PARTITION_MODE
mptable_build(vm); mptable_build(vm);
prepare_vcpu(vm, vm_config->vm_pcpu_ids[0]); prepare_vcpu(vm, vm_config->vm_pcpu_ids[0]);
@ -393,7 +389,19 @@ int32_t prepare_vm(uint16_t pcpu_id)
/* Prepare the AP for vm */ /* Prepare the AP for vm */
for (i = 1U; i < vm_config->vm_hw_num_cores; i++) for (i = 1U; i < vm_config->vm_hw_num_cores; i++)
prepare_vcpu(vm, vm_config->vm_pcpu_ids[i]); prepare_vcpu(vm, vm_config->vm_pcpu_ids[i]);
#else
for (i = 0U; i < get_pcpu_nums(); i++) {
if (bitmap_test(i, &vm_config->pcpu_bitmap)) {
err = prepare_vcpu(vm, i);
if (err != 0) {
break;
}
}
}
#endif
}
if (err == 0) {
if (vm_sw_loader == NULL) { if (vm_sw_loader == NULL) {
vm_sw_loader = general_sw_loader; vm_sw_loader = general_sw_loader;
} }
@ -403,65 +411,33 @@ int32_t prepare_vm(uint16_t pcpu_id)
/* start vm BSP automatically */ /* start vm BSP automatically */
start_vm(vm); start_vm(vm);
pr_acrnlog("Start VM%x", vm_config->vm_id); pr_acrnlog("Start VM%x", vm->vm_id);
} }
return ret;
} }
/**
* @pre vm_config != NULL
*/
void launch_vms(uint16_t pcpu_id)
{
uint16_t vm_id, bsp_id;
struct acrn_vm_config *vm_config;
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
vm_config = get_vm_config(vm_id);
if ((vm_config->type == SOS_VM) || (vm_config->type == PRE_LAUNCHED_VM)) {
if (vm_config->type == SOS_VM) {
sos_vm_ptr = &vm_array[vm_id];
}
#ifdef CONFIG_PARTITION_MODE
bsp_id = vm_config->vm_pcpu_ids[0];
#else #else
bsp_id = get_vm_bsp_pcpu_id(vm_config);
/* Create vm/vcpu for sos_vm */
static int32_t prepare_sos_vm(void)
{
int32_t err;
uint16_t i;
struct acrn_vm *vm = NULL;
struct acrn_vm_config sos_vm_config;
(void)memset((void *)&sos_vm_config, 0U, sizeof(sos_vm_config));
sos_vm_config.vm_hw_num_cores = get_pcpu_nums();
err = create_vm(&sos_vm_config, &vm);
if (err == 0) {
/* Allocate all cpus to sos_vm at the beginning */
for (i = 0U; i < sos_vm_config.vm_hw_num_cores; i++) {
err = prepare_vcpu(vm, i);
if (err != 0) {
break;
}
}
if (err == 0) {
if (vm_sw_loader == NULL) {
vm_sw_loader = general_sw_loader;
}
if (is_sos_vm(vm)) {
(void)vm_sw_loader(vm);
}
/* start sos_vm BSP automatically */
start_vm(vm);
pr_acrnlog("Start SOS_VM");
}
}
return err;
}
int32_t prepare_vm(uint16_t pcpu_id)
{
int32_t err = 0;
/* prepare sos_vm if pcpu_id is BOOT_CPU_ID */
if (pcpu_id == BOOT_CPU_ID) {
err = prepare_sos_vm();
}
return err;
}
#endif #endif
if (pcpu_id == bsp_id) {
prepare_vm(vm_id, vm_config);
}
}
}
}

View File

@ -55,13 +55,7 @@ static void enter_guest_mode(uint16_t pcpu_id)
{ {
vmx_on(); vmx_on();
#ifdef CONFIG_PARTITION_MODE (void)launch_vms(pcpu_id);
(void)prepare_vm(pcpu_id);
#else
if (pcpu_id == BOOT_CPU_ID) {
(void)prepare_vm(pcpu_id);
}
#endif
switch_to_idle(default_idle); switch_to_idle(default_idle);

View File

@ -104,35 +104,44 @@ int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param)
* @param param guest physical memory address. This gpa points to * @param param guest physical memory address. This gpa points to
* struct acrn_create_vm * struct acrn_create_vm
* *
* @pre Pointer vm shall point to SOS_VM * @pre Pointer vm shall point to SOS_VM, vm_config != NULL
* @return 0 on success, non-zero on error. * @return 0 on success, non-zero on error.
*/ */
int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param) int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
{ {
int32_t ret; uint16_t vm_id;
int32_t ret = -1;
struct acrn_vm *target_vm = NULL; struct acrn_vm *target_vm = NULL;
struct acrn_create_vm cv; struct acrn_create_vm cv;
struct acrn_vm_config vm_config; struct acrn_vm_config* vm_config = NULL;
(void)memset((void *)&cv, 0U, sizeof(cv)); (void)memset((void *)&cv, 0U, sizeof(cv));
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) == 0) { if (copy_from_gpa(vm, &cv, param, sizeof(cv)) == 0) {
(void)memset(&vm_config, 0U, sizeof(vm_config)); /* check whether there is a free vm id for use */
vm_config.sworld_supported = ((cv.vm_flag & (SECURE_WORLD_ENABLED)) != 0U); /* TODO: pass vm id from DM to make vm_id static */
(void)memcpy_s(&vm_config.GUID[0], 16U, &cv.GUID[0], 16U); vm_id = find_free_vm_id();
if (vm_id < CONFIG_MAX_VM_NUM) {
vm_config = get_vm_config(vm_id);
/* TODO: set by DM */
vm_config->type = NORMAL_VM;
vm_config->guest_flags |= cv.vm_flag;
vm_config->sworld_supported = ((cv.vm_flag & (SECURE_WORLD_ENABLED)) != 0U);
(void)memcpy_s(&vm_config->GUID[0], 16U, &cv.GUID[0], 16U);
ret = create_vm(&vm_config, &target_vm); ret = create_vm(vm_id, vm_config, &target_vm);
if (ret != 0) { if (ret != 0) {
dev_dbg(ACRN_DBG_HYCALL, "HCALL: Create VM failed"); dev_dbg(ACRN_DBG_HYCALL, "HCALL: Create VM failed");
cv.vmid = ACRN_INVALID_VMID; cv.vmid = ACRN_INVALID_VMID;
ret = -1; ret = -1;
} else { } else {
cv.vmid = target_vm->vm_id; cv.vmid = target_vm->vm_id;
ret = 0; ret = 0;
} }
if (copy_to_gpa(vm, &cv.vmid, param, sizeof(cv.vmid)) != 0) { if (copy_to_gpa(vm, &cv.vmid, param, sizeof(cv.vmid)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__); pr_err("%s: Unable copy param to vm\n", __func__);
ret = -1; ret = -1;
}
} }
} else { } else {
pr_err("%s: Unable copy param to vm\n", __func__); pr_err("%s: Unable copy param to vm\n", __func__);

View File

@ -83,10 +83,10 @@ enum vpic_wire_mode {
/* Enumerated type for VM states */ /* Enumerated type for VM states */
enum vm_state { enum vm_state {
VM_CREATED = 0, /* VM created / awaiting start (boot) */ VM_STATE_UNKNOWN = 0,
VM_CREATED, /* VM created / awaiting start (boot) */
VM_STARTED, /* VM started (booted) */ VM_STARTED, /* VM started (booted) */
VM_PAUSED, /* VM paused */ VM_PAUSED, /* VM paused */
VM_STATE_UNKNOWN
}; };
struct vm_arch { struct vm_arch {
@ -235,11 +235,6 @@ struct acrn_vm_config {
} __aligned(8); } __aligned(8);
static inline bool is_sos_vm(const struct acrn_vm *vm)
{
return (vm->vm_id) == 0U;
}
/* /*
* @pre vcpu_id < CONFIG_MAX_VCPUS_PER_VM * @pre vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/ */
@ -310,21 +305,20 @@ void resume_vm(struct acrn_vm *vm);
void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec); void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec);
void start_vm(struct acrn_vm *vm); void start_vm(struct acrn_vm *vm);
int32_t reset_vm(struct acrn_vm *vm); int32_t reset_vm(struct acrn_vm *vm);
int32_t create_vm(struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm); int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm);
int32_t prepare_vm(uint16_t pcpu_id); void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config);
void launch_vms(uint16_t pcpu_id);
extern struct acrn_vm_config vm_configs[]; extern struct acrn_vm_config vm_configs[];
#ifdef CONFIG_PARTITION_MODE bool is_sos_vm(const struct acrn_vm *vm);
const struct vm_config_arraies *get_vm_config_base(void); uint16_t find_free_vm_id(void);
#endif
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id); struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
#ifdef CONFIG_PARTITION_MODE #ifdef CONFIG_PARTITION_MODE
struct vm_config_arraies { struct vm_config_arraies {
int32_t num_vm_config; int32_t num_vm_config;
struct acrn_vm_config vm_config_array[]; struct acrn_vm_config vm_config_array[CONFIG_MAX_VM_NUM];
}; };
struct pcpu_vm_config_mapping { struct pcpu_vm_config_mapping {
@ -332,7 +326,21 @@ struct pcpu_vm_config_mapping {
bool is_bsp; bool is_bsp;
}; };
extern const struct pcpu_vm_config_mapping pcpu_vm_config_map[]; extern const struct pcpu_vm_config_mapping pcpu_vm_config_map[];
extern struct vm_config_arraies vm_config_partition;
void vrtc_init(struct acrn_vm *vm); void vrtc_init(struct acrn_vm *vm);
#endif #endif
/*
* @pre vm_id < CONFIG_MAX_VM_NUM
*/
static inline struct acrn_vm_config *get_vm_config(uint16_t vm_id)
{
#ifdef CONFIG_PARTITION_MODE
return &vm_config_partition.vm_config_array[vm_id];
#else
return &vm_configs[vm_id];
#endif
}
#endif /* VM_H_ */ #endif /* VM_H_ */

View File

@ -157,10 +157,10 @@ struct vm_config_arraies vm_config_partition = {
/* Virtual Machine descriptions */ /* Virtual Machine descriptions */
.vm_config_array = { .vm_config_array = {
{ {
.type = PRE_LAUNCHED_VM,
/* Internal variable, MUSTBE init to -1 */ /* Internal variable, MUSTBE init to -1 */
.vm_hw_num_cores = VM1_NUM_CPUS, .vm_hw_num_cores = VM1_NUM_CPUS,
.vm_pcpu_ids = &VM1_CPUS[0], .vm_pcpu_ids = &VM1_CPUS[0],
.vm_id = 1U,
.start_hpa = 0x100000000UL, .start_hpa = 0x100000000UL,
.mem_size = 0x20000000UL, /* uses contiguous memory from host */ .mem_size = 0x20000000UL, /* uses contiguous memory from host */
.vm_vuart = true, .vm_vuart = true,
@ -171,10 +171,10 @@ struct vm_config_arraies vm_config_partition = {
}, },
{ {
.type = PRE_LAUNCHED_VM,
/* Internal variable, MUSTBE init to -1 */ /* Internal variable, MUSTBE init to -1 */
.vm_hw_num_cores = VM2_NUM_CPUS, .vm_hw_num_cores = VM2_NUM_CPUS,
.vm_pcpu_ids = &VM2_CPUS[0], .vm_pcpu_ids = &VM2_CPUS[0],
.vm_id = 2U,
.start_hpa = 0x120000000UL, .start_hpa = 0x120000000UL,
.mem_size = 0x20000000UL, /* uses contiguous memory from host */ .mem_size = 0x20000000UL, /* uses contiguous memory from host */
.vm_vuart = true, .vm_vuart = true,

View File

@ -191,10 +191,10 @@ struct vm_config_arraies vm_config_partition = {
/* Virtual Machine descriptions */ /* Virtual Machine descriptions */
.vm_config_array = { .vm_config_array = {
{ {
.type = PRE_LAUNCHED_VM,
/* Internal variable, MUSTBE init to -1 */ /* Internal variable, MUSTBE init to -1 */
.vm_hw_num_cores = VM1_NUM_CPUS, .vm_hw_num_cores = VM1_NUM_CPUS,
.vm_pcpu_ids = &VM1_CPUS[0], .vm_pcpu_ids = &VM1_CPUS[0],
.vm_id = 1U,
.start_hpa = 0x100000000UL, .start_hpa = 0x100000000UL,
.mem_size = 0x80000000UL, /* uses contiguous memory from host */ .mem_size = 0x80000000UL, /* uses contiguous memory from host */
.vm_vuart = true, .vm_vuart = true,
@ -206,10 +206,10 @@ struct vm_config_arraies vm_config_partition = {
}, },
{ {
.type = PRE_LAUNCHED_VM,
/* Internal variable, MUSTBE init to -1 */ /* Internal variable, MUSTBE init to -1 */
.vm_hw_num_cores = VM2_NUM_CPUS, .vm_hw_num_cores = VM2_NUM_CPUS,
.vm_pcpu_ids = &VM2_CPUS[0], .vm_pcpu_ids = &VM2_CPUS[0],
.vm_id = 2U,
.start_hpa = 0x180000000UL, .start_hpa = 0x180000000UL,
.mem_size = 0x80000000UL, /* uses contiguous memory from host */ .mem_size = 0x80000000UL, /* uses contiguous memory from host */
.vm_vuart = true, .vm_vuart = true,