Files
acrn-hypervisor/hypervisor/common/vm.c
Yifan Liu a4f136188c hv: vm: Move reset_vm to common scope
Move reset_vm to common scope and remove unused reset_mode.

The reset_mode in x86 reset_vm code is simply used as an if condition
on whether the prepare_os_image should be executed. The entire
if body will never be true as we don't support resetting Service VM
without resetting ACRN hypervisor. To reset Service VM, the only
way is through a platform reset. Therefore the prepare_os_image
action will never be called. Delete this action.

Once the if condition and prepare_os_image action is deleted,
the input parameter "mode" is useless. Delete that too. The reset_vm
API in ACRN is simply a "warm reset". It does not need to take
input.

Tracked-On: #8830
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Reviewed-by: Fei Li <fei1.li@intel.com>
Acked-by: Wang Yu1 <yu1.wang@intel.com>
2025-10-30 13:30:32 +08:00

272 lines
6.3 KiB
C

/*
* Copyright (C) 2018-2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vcpu.h>
#include <vm.h>
#include <logmsg.h>
#include <sbuf.h>
#include <sprintf.h>
#include <asm/host_pm.h>
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
static struct acrn_vm *service_vm_ptr = NULL;
/**
* @pre vm != NULL
*/
bool is_paused_vm(const struct acrn_vm *vm)
{
return (vm->state == VM_PAUSED);
}
/**
* @pre vm_config != NULL
*/
static inline uint16_t get_configured_bsp_pcpu_id(const struct acrn_vm_config *vm_config)
{
/*
* The set least significant bit represents the pCPU ID for BSP
* vm_config->cpu_affinity has been sanitized to contain valid pCPU IDs
*/
return ffs64(vm_config->cpu_affinity);
}
/**
* return a pointer to the virtual machine structure associated with
* this VM ID
*
* @pre vm_id < CONFIG_MAX_VM_NUM
*/
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
{
return &vm_array[vm_id];
}
/* return a pointer to the virtual machine structure of Service VM */
struct acrn_vm *get_service_vm(void)
{
ASSERT(service_vm_ptr != NULL, "service_vm_ptr is NULL");
return service_vm_ptr;
}
bool is_ready_for_system_shutdown(void)
{
bool ret = true;
uint16_t vm_id;
struct acrn_vm *vm;
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
vm = get_vm_from_vmid(vm_id);
/* TODO: Update code to cover hybrid mode */
if (!is_poweroff_vm(vm) && is_stateful_vm(vm)) {
ret = false;
break;
}
}
return ret;
}
/**
* @pre vm_config != NULL
* @Application constraint: The validity of vm_config->cpu_affinity should be guaranteed before run-time.
*/
void launch_vms(uint16_t pcpu_id)
{
uint16_t vm_id;
struct acrn_vm *vm;
struct acrn_vm_config *vm_config;
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
vm_config = get_vm_config(vm_id);
if (((vm_config->guest_flags & GUEST_FLAG_REE) != 0U) &&
((vm_config->guest_flags & GUEST_FLAG_TEE) != 0U)) {
ASSERT(false, "%s: Wrong VM (VM id: %u) configuration, can't set both REE and TEE flags",
__func__, vm_id);
}
if ((vm_config->load_order == SERVICE_VM) || (vm_config->load_order == PRE_LAUNCHED_VM)) {
if (pcpu_id == get_configured_bsp_pcpu_id(vm_config)) {
if (vm_config->load_order == SERVICE_VM) {
service_vm_ptr = &vm_array[vm_id];
}
/*
* We can only start a VM when there is no error in prepare_vm.
* Otherwise, print out the corresponding error.
*
* We can only start REE VM when get the notification from TEE VM.
* so skip "start_vm" here for REE, and start it in TEE hypercall
* HC_TEE_VCPU_BOOT_DONE.
*/
if (create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm) == 0) {
if ((vm_config->guest_flags & GUEST_FLAG_REE) != 0U) {
/* Nothing need to do here, REE will start in TEE hypercall */
} else {
if (prepare_os_image(vm) == 0) {
start_vm(vm);
pr_acrnlog("Start VM id: %x name: %s", vm_id, vm_config->name);
}
}
}
}
}
}
}
/**
* @pre vm != NULL
* @pre vm->state == VM_CREATED
*/
void start_vm(struct acrn_vm *vm)
{
struct acrn_vcpu *vcpu = vcpu_from_vid(vm, BSP_CPU_ID);
arch_vm_prepare_bsp(vcpu);
launch_vcpu(vcpu);
vm->state = VM_RUNNING;
}
/**
* @pre vm != NULL
*/
void pause_vm(struct acrn_vm *vm)
{
uint16_t i;
struct acrn_vcpu *vcpu = NULL;
if (((is_severity_pass(vm->vm_id)) && (vm->state == VM_RUNNING)) ||
(vm->state == VM_READY_TO_POWEROFF) ||
(vm->state == VM_CREATED)) {
foreach_vcpu(i, vm, vcpu) {
zombie_vcpu(vcpu);
}
vm->state = VM_PAUSED;
}
}
int32_t destroy_vm(struct acrn_vm *vm)
{
int32_t ret = 0;
uint16_t i;
struct acrn_vm_config *vm_config = NULL;
struct acrn_vcpu *vcpu = NULL;
/* Only allow shutdown paused vm */
vm->state = VM_POWERED_OFF;
if (is_service_vm(vm)) {
sbuf_reset();
}
/* TODO: Same as create_vm, we have several common module initialization
* logic inside arch_deinit_vm. */
ret = arch_deinit_vm(vm);
foreach_vcpu(i, vm, vcpu) {
destroy_vcpu(vcpu);
}
/* after guest_flags not used, then clear it */
vm_config = get_vm_config(vm->vm_id);
vm_config->guest_flags &= ~DM_OWNED_GUEST_FLAG_MASK;
if (!is_static_configured_vm(vm)) {
memset(vm_config->name, 0U, MAX_VM_NAME_LEN);
}
/* Return status to caller */
return ret;
}
/**
* @pre vm_id < CONFIG_MAX_VM_NUM && vm_config != NULL && rtn_vm != NULL
* @pre vm->state == VM_POWERED_OFF
*/
int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm)
{
int32_t status = 0;
uint16_t pcpu_id;
struct acrn_vm *vm = NULL;
/* Allocate memory for virtual machine */
vm = &vm_array[vm_id];
vm->vm_id = vm_id;
vm->hw.created_vcpus = 0U;
if (vm_config->name[0] == '\0') {
/* if VM name is not configured, specify with VM ID */
snprintf(vm_config->name, 16, "ACRN VM_%d", vm_id);
}
(void)memcpy_s(&vm->name[0], MAX_VM_NAME_LEN, &vm_config->name[0], MAX_VM_NAME_LEN);
vm->sw.vm_event_sbuf = NULL;
vm->sw.io_shared_page = NULL;
vm->sw.asyncio_sbuf = NULL;
if ((vm_config->load_order == POST_LAUNCHED_VM)
&& ((vm_config->guest_flags & GUEST_FLAG_IO_COMPLETION_POLLING) != 0U)) {
/* enable IO completion polling mode per its guest flags in vm_config. */
vm->sw.is_polling_ioreq = true;
}
spinlock_init(&vm->stg2pt_lock);
spinlock_init(&vm->emul_mmio_lock);
vm->nr_emul_mmio_regions = 0U;
/* TODO: Some logic inside arch_init_vm can also be moved to common but
* we didn't come up with abstraction good enough to capture dependencies. Leave those
* inside arch for now. */
status = arch_init_vm(vm, vm_config);
if (status == 0) {
/* We have assumptions:
* 1) vcpus used by Service VM has been offlined by DM before User VM re-use it.
* 2) pcpu_bitmap passed sanitization is OK for vcpu creating.
*/
vm->hw.cpu_affinity = pcpu_bitmap;
uint64_t tmp64 = pcpu_bitmap;
while (tmp64 != 0UL) {
pcpu_id = ffs64(tmp64);
bitmap_clear_non_atomic(pcpu_id, &tmp64);
status = create_vcpu(vm, pcpu_id);
if (status != 0) {
break;
}
}
}
if (status == 0) {
vm->state = VM_CREATED;
/* Populate return VM handle */
*rtn_vm = vm;
}
return status;
}
/**
* "Warm" reset a VM.
* To "Cold" reset a VM, simply destroy and re-create.
*
* @pre vm->state == VM_PAUSED
*/
int32_t reset_vm(struct acrn_vm *vm)
{
int32_t ret = -1;
ret = arch_reset_vm(vm);
if (ret == 0) {
vm->state = VM_CREATED;
}
return ret;
}