hv: vcpu: Move create_vcpu to common vcpu.c

Move vcpu API create_vcpu to common.

* Break create_vcpu into common vcpu init and arch_init_vcpu
  for arch-specific initialization.
* Move vcpu_thread to arch-specific and rename to arch_vcpu_thread

Tracked-On: #8830
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Reviewed-by: Fei Li <fei1.li@intel.com>
Acked-by: Wang Yu1 <yu1.wang@intel.com>
This commit is contained in:
Yifan Liu
2025-08-27 06:03:37 +00:00
committed by acrnsi-robot
parent 688741074f
commit 134c5f6ab7
8 changed files with 267 additions and 230 deletions

View File

@@ -127,6 +127,8 @@ ifneq ($(CONFIG_RELEASE),y)
CFLAGS += -DHV_DEBUG -DPROFILING_ON -fno-omit-frame-pointer
endif
COMMON_C_SRCS += common/vcpu.c
# FIXME: During initial development stage of riscv enabling,
# we would like to first confine the common files to x86-only.
# As we progress through the riscv enabling process, multi-arch

View File

@@ -23,11 +23,8 @@
#include <asm/lapic.h>
#include <asm/irq.h>
#include <console.h>
bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BSP_CPU_ID);
}
#include <trace.h>
#include <asm/guest/vmexit.h>
enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
@@ -529,139 +526,67 @@ void set_vcpu_startup_entry(struct acrn_vcpu *vcpu, uint64_t entry)
vcpu_set_rip(vcpu, 0UL);
}
/*
* @pre vm != NULL && rtn_vcpu_handle != NULL
*/
int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
int32_t arch_init_vcpu(struct acrn_vcpu *vcpu)
{
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
int32_t ret;
struct acrn_vm *vm = vcpu->vm;
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
pr_info("Creating VCPU working on PCPU%hu", pcpu_id);
if (is_lapic_pt_configured(vm) || is_using_init_ipi()) {
/* Lapic_pt pCPU does not enable irq in root mode. So it
* should be set to PAUSE idle mode.
* At this point the pCPU is possibly in HLT idle. And the
* kick mode is to be set to INIT kick, which will not be
* able to wake root mode HLT. So a kick(if pCPU is in HLT
* idle, the kick mode is certainly ipi kick) will change
* it to PAUSE idle right away.
*/
if (per_cpu(arch.idle_mode, pcpu_id) == IDLE_MODE_HLT) {
per_cpu(arch.idle_mode, pcpu_id) = IDLE_MODE_PAUSE;
kick_pcpu(pcpu_id);
}
per_cpu(arch.kick_pcpu_mode, pcpu_id) = DEL_MODE_INIT;
} else {
per_cpu(arch.kick_pcpu_mode, pcpu_id) = DEL_MODE_IPI;
per_cpu(arch.idle_mode, pcpu_id) = IDLE_MODE_HLT;
}
pr_info("pcpu=%d, kick-mode=%d, use_init_flag=%d", pcpu_id,
per_cpu(arch.kick_pcpu_mode, pcpu_id), is_using_init_ipi());
/*
* vcpu->vcpu_id = vm->hw.created_vcpus;
* vm->hw.created_vcpus++;
* If the logical processor is in VMX non-root operation and
* the "enable VPID" VM-execution control is 1, the current VPID
* is the value of the VPID VM-execution control field in the VMCS.
*
* This assignment guarantees a unique non-zero per vcpu vpid at runtime.
*/
vcpu_id = vm->hw.created_vcpus;
if (vcpu_id < MAX_VCPUS_PER_VM) {
/* Allocate memory for VCPU */
vcpu = &(vm->hw.vcpu_array[vcpu_id]);
(void)memset((void *)vcpu, 0U, sizeof(struct acrn_vcpu));
vcpu->arch.vpid = ALLOCATED_MIN_L1_VPID + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id;
/* Initialize CPU ID for this VCPU */
vcpu->vcpu_id = vcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
/*
* Use vm_id as the index to indicate the posted interrupt IRQ/vector pair that are
* assigned to this vCPU:
* 0: first posted interrupt IRQs/vector pair (POSTED_INTR_IRQ/POSTED_INTR_VECTOR)
* ...
* CONFIG_MAX_VM_NUM-1: last posted interrupt IRQs/vector pair
* ((POSTED_INTR_IRQ + CONFIG_MAX_VM_NUM - 1U)/(POSTED_INTR_VECTOR + CONFIG_MAX_VM_NUM - 1U)
*/
vcpu->arch.pid.control.bits.nv = POSTED_INTR_VECTOR + vm->vm_id;
if (is_lapic_pt_configured(vm) || is_using_init_ipi()) {
/* Lapic_pt pCPU does not enable irq in root mode. So it
* should be set to PAUSE idle mode.
* At this point the pCPU is possibly in HLT idle. And the
* kick mode is to be set to INIT kick, which will not be
* able to wake root mode HLT. So a kick(if pCPU is in HLT
* idle, the kick mode is certainly ipi kick) will change
* it to PAUSE idle right away.
*/
if (per_cpu(arch.idle_mode, pcpu_id) == IDLE_MODE_HLT) {
per_cpu(arch.idle_mode, pcpu_id) = IDLE_MODE_PAUSE;
kick_pcpu(pcpu_id);
}
per_cpu(arch.kick_pcpu_mode, pcpu_id) = DEL_MODE_INIT;
} else {
per_cpu(arch.kick_pcpu_mode, pcpu_id) = DEL_MODE_IPI;
per_cpu(arch.idle_mode, pcpu_id) = IDLE_MODE_HLT;
}
pr_info("pcpu=%d, kick-mode=%d, use_init_flag=%d", pcpu_id,
per_cpu(arch.kick_pcpu_mode, pcpu_id), is_using_init_ipi());
/* ACRN does not support vCPU migration, one vCPU always runs on
* the same pCPU, so PI's ndst is never changed after startup.
*/
vcpu->arch.pid.control.bits.ndst = per_cpu(arch.lapic_id, pcpu_id);
/* Initialize the parent VM reference */
vcpu->vm = vm;
/* Create per vcpu vlapic */
vlapic_create(vcpu, pcpu_id);
/* Initialize the virtual ID for this VCPU */
/* FIXME:
* We have assumption that we always destroys vcpus in one
* shot (like when vm is destroyed). If we need to support
* specific vcpu destroy on fly, this vcpu_id assignment
* needs revise.
*/
pr_info("Create VM%d-VCPU%d, Role: %s",
vcpu->vm->vm_id, vcpu->vcpu_id,
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
/*
* If the logical processor is in VMX non-root operation and
* the "enable VPID" VM-execution control is 1, the current VPID
* is the value of the VPID VM-execution control field in the VMCS.
*
* This assignment guarantees a unique non-zero per vcpu vpid at runtime.
*/
vcpu->arch.vpid = ALLOCATED_MIN_L1_VPID + (vm->vm_id * MAX_VCPUS_PER_VM) + vcpu->vcpu_id;
/*
* There are two locally independent writing operations, namely the
* assignment of vcpu->vm and vcpu_array[]. Compilers may optimize
* and reorder writing operations while users of vcpu_array[] may
* assume the presence of vcpu->vm. A compiler barrier is added here
* to prevent compiler reordering, ensuring that assignments to
* vcpu->vm precede vcpu_array[].
*/
cpu_compiler_barrier();
/*
* ACRN uses the following approach to manage VT-d PI notification vectors:
* Allocate unique Activation Notification Vectors (ANV) for each vCPU that
* belongs to the same pCPU, the ANVs need only be unique within each pCPU,
* not across all vCPUs. The max numbers of vCPUs may be running on top of
* a pCPU is CONFIG_MAX_VM_NUM, since ACRN does not support 2 vCPUs of same
* VM running on top of same pCPU. This reduces # of pre-allocated ANVs for
* posted interrupts to CONFIG_MAX_VM_NUM, and enables ACRN to avoid switching
* between active and wake-up vector values in the posted interrupt descriptor
* on vCPU scheduling state changes.
*
* We maintain a per-pCPU array of vCPUs, and use vm_id as the index to the
* vCPU array
*/
per_cpu(vcpu_array, pcpu_id)[vm->vm_id] = vcpu;
/*
* Use vm_id as the index to indicate the posted interrupt IRQ/vector pair that are
* assigned to this vCPU:
* 0: first posted interrupt IRQs/vector pair (POSTED_INTR_IRQ/POSTED_INTR_VECTOR)
* ...
* CONFIG_MAX_VM_NUM-1: last posted interrupt IRQs/vector pair
* ((POSTED_INTR_IRQ + CONFIG_MAX_VM_NUM - 1U)/(POSTED_INTR_VECTOR + CONFIG_MAX_VM_NUM - 1U)
*/
vcpu->arch.pid.control.bits.nv = POSTED_INTR_VECTOR + vm->vm_id;
/* ACRN does not support vCPU migration, one vCPU always runs on
* the same pCPU, so PI's ndst is never changed after startup.
*/
vcpu->arch.pid.control.bits.ndst = per_cpu(arch.lapic_id, pcpu_id);
/* Create per vcpu vlapic */
vlapic_create(vcpu, pcpu_id);
if (!vm_hide_mtrr(vm)) {
init_vmtrr(vcpu);
}
/* Populate the return handle */
*rtn_vcpu_handle = vcpu;
vcpu_set_state(vcpu, VCPU_INIT);
init_xsave(vcpu);
vcpu_reset_internal(vcpu, POWER_ON_RESET);
(void)memset((void *)&vcpu->req, 0U, sizeof(struct io_request));
vm->hw.created_vcpus++;
ret = 0;
} else {
pr_err("%s, vcpu id is invalid!\n", __func__);
ret = -EINVAL;
if (!vm_hide_mtrr(vm)) {
init_vmtrr(vcpu);
}
return ret;
init_xsave(vcpu);
vcpu_reset_internal(vcpu, POWER_ON_RESET);
return 0;
}
/**
@@ -854,7 +779,7 @@ void kick_vcpu(struct acrn_vcpu *vcpu)
/*
* @pre (&vcpu->stack[CONFIG_STACK_SIZE] & (CPU_STACK_ALIGN - 1UL)) == 0
*/
static uint64_t build_stack_frame(struct acrn_vcpu *vcpu)
uint64_t arch_build_stack_frame(struct acrn_vcpu *vcpu)
{
uint64_t stacktop = (uint64_t)&vcpu->stack[CONFIG_STACK_SIZE];
struct stack_frame *frame;
@@ -948,7 +873,7 @@ void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *e
* will call them every thread switch. We can implement lazy context swtich , which
* only do context swtich when really need.
*/
static void context_switch_out(struct thread_object *prev)
void arch_context_switch_out(struct thread_object *prev)
{
struct acrn_vcpu *vcpu = container_of(prev, struct acrn_vcpu, thread_obj);
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
@@ -964,7 +889,7 @@ static void context_switch_out(struct thread_object *prev)
save_xsave_area(vcpu, ectx);
}
static void context_switch_in(struct thread_object *next)
void arch_context_switch_in(struct thread_object *next)
{
struct acrn_vcpu *vcpu = container_of(next, struct acrn_vcpu, thread_obj);
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
@@ -1006,32 +931,6 @@ void launch_vcpu(struct acrn_vcpu *vcpu)
}
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
int32_t ret, i;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];
ret = create_vcpu(pcpu_id, vm, &vcpu);
if (ret == 0) {
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->thread_obj.name, 16U, thread_name, 16U);
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id;
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in;
init_thread_data(&vcpu->thread_obj, &get_vm_config(vm->vm_id)->sched_params);
for (i = 0; i < VCPU_EVENT_NUM; i++) {
init_event(&vcpu->events[i]);
}
}
return ret;
}
/**
* @pre vcpu != NULL
*/
@@ -1104,3 +1003,65 @@ void vcpu_set_state(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
vcpu->state = new_state;
update_vm_vlapic_state(vcpu->vm);
}
/* TODO: Our goal is to have a vcpu_thread that is common.
* Leave this to future optimization.
*/
void arch_vcpu_thread(struct thread_object *obj)
{
struct acrn_vcpu *vcpu = container_of(obj, struct acrn_vcpu, thread_obj);
int32_t ret = 0;
do {
if (!is_lapic_pt_enabled(vcpu)) {
local_irq_disable();
}
/* Don't open interrupt window between here and vmentry */
if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule();
}
/* Check and process pending requests(including interrupt) */
ret = acrn_handle_pending_request(vcpu);
if (ret < 0) {
pr_fatal("vcpu handling pending request fail");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (triple fault). Stop the vcpu running. */
continue;
}
reset_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
profiling_vmenter_handler(vcpu);
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
ret = run_vcpu(vcpu);
if (ret != 0) {
pr_fatal("vcpu resume failed");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (resume vcpu failed). Stop the vcpu running. */
continue;
}
TRACE_2L(TRACE_VM_EXIT, vcpu->arch.exit_reason, vcpu_get_rip(vcpu));
profiling_pre_vmexit_handler(vcpu);
if (!is_lapic_pt_enabled(vcpu)) {
local_irq_enable();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
" %d, ret = %d!", vcpu->arch.exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}
profiling_post_vmexit_handler(vcpu);
} while (1);
}

View File

@@ -39,6 +39,7 @@
#include <asm/rtcm.h>
#include <asm/irq.h>
#include <uart16550.h>
#include <vcpu.h>
#ifdef CONFIG_SECURITY_VM_FIXUP
#include <quirks/security_vm_fixup.h>
#endif
@@ -833,7 +834,7 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
while (tmp64 != 0UL) {
pcpu_id = ffs64(tmp64);
bitmap_clear_non_atomic(pcpu_id, &tmp64);
status = prepare_vcpu(vm, pcpu_id);
status = create_vcpu(vm, pcpu_id);
if (status != 0) {
break;
}

View File

@@ -15,62 +15,3 @@
#include <trace.h>
#include <logmsg.h>
#include <per_cpu.h>
void vcpu_thread(struct thread_object *obj)
{
struct acrn_vcpu *vcpu = container_of(obj, struct acrn_vcpu, thread_obj);
int32_t ret = 0;
do {
if (!is_lapic_pt_enabled(vcpu)) {
local_irq_disable();
}
/* Don't open interrupt window between here and vmentry */
if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule();
}
/* Check and process pending requests(including interrupt) */
ret = acrn_handle_pending_request(vcpu);
if (ret < 0) {
pr_fatal("vcpu handling pending request fail");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (triple fault). Stop the vcpu running. */
continue;
}
reset_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
profiling_vmenter_handler(vcpu);
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
ret = run_vcpu(vcpu);
if (ret != 0) {
pr_fatal("vcpu resume failed");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (resume vcpu failed). Stop the vcpu running. */
continue;
}
TRACE_2L(TRACE_VM_EXIT, vcpu->arch.exit_reason, vcpu_get_rip(vcpu));
profiling_pre_vmexit_handler(vcpu);
if (!is_lapic_pt_enabled(vcpu)) {
local_irq_enable();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
" %d, ret = %d!", vcpu->arch.exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}
profiling_post_vmexit_handler(vcpu);
} while (1);
}

119
hypervisor/common/vcpu.c Normal file
View File

@@ -0,0 +1,119 @@
/*
* Copyright (C) 2018-2025 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vcpu.h>
#include <vm.h>
#include <errno.h>
#include <per_cpu.h>
#include <sprintf.h>
#include <logmsg.h>
#include <schedule.h>
bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BSP_CPU_ID);
}
static void init_vcpu_thread(struct acrn_vcpu *vcpu, uint16_t pcpu_id)
{
struct acrn_vm *vm = vcpu->vm;
char thread_name[16];
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->thread_obj.name, 16U, thread_name, 16U);
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = arch_vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id;
vcpu->thread_obj.host_sp = arch_build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = arch_context_switch_out;
vcpu->thread_obj.switch_in = arch_context_switch_in;
init_thread_data(&vcpu->thread_obj, &get_vm_config(vm->vm_id)->sched_params);
}
/**
* @brief create a vcpu for the target vm
*
* Creates/allocates a vCPU instance, with initialization for its vcpu_id,
* vpid, vmcs, vlapic, etc. It sets the init vCPU state to VCPU_INIT
*
* The call has the following assumption:
* - The caller is responsible to lock-protect this call
* - We don't support having more than one vCPUs of the same VM
* on the same pCPU
*
* @param[in] pcpu_id created vcpu will run on this pcpu
* @param[in] vm pointer to vm data structure, this vcpu will owned by this vm.
*
* @retval 0 vcpu created successfully, other values failed.
*/
int32_t create_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
int32_t i, ret;
pr_info("Creating VCPU on PCPU%hu", pcpu_id);
/*
* vcpu->vcpu_id = vm->hw.created_vcpus;
* vm->hw.created_vcpus++;
*/
vcpu_id = vm->hw.created_vcpus;
if (vcpu_id < MAX_VCPUS_PER_VM) {
/* Allocate memory for VCPU */
vcpu = &(vm->hw.vcpu_array[vcpu_id]);
(void)memset((void *)vcpu, 0U, sizeof(struct acrn_vcpu));
/* Initialize CPU ID for this VCPU */
vcpu->vcpu_id = vcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
/* Initialize the parent VM reference */
vcpu->vm = vm;
/* Initialize the virtual ID for this VCPU */
/* FIXME:
* We have assumption that we always destroys vcpus in one
* shot (like when vm is destroyed). If we need to support
* specific vcpu destroy on fly, this vcpu_id assignment
* needs revise.
*/
pr_info("Create VM%d-VCPU%d, Role: %s",
vcpu->vm->vm_id, vcpu->vcpu_id,
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
cpu_compiler_barrier();
/*
* We maintain a per-pCPU array of vCPUs, and use vm_id as the index to the
* vCPU array
*/
per_cpu(vcpu_array, pcpu_id)[vm->vm_id] = vcpu;
(void)memset((void *)&vcpu->req, 0U, sizeof(struct io_request));
vm->hw.created_vcpus++;
/* pcpuid_from_vcpu works after this call */
init_vcpu_thread(vcpu, pcpu_id);
/* init event */
for (i = 0; i < MAX_VCPU_EVENT_NUM; i++) {
init_event(&vcpu->events[i]);
}
ret = arch_init_vcpu(vcpu);
if (ret == 0) {
vcpu->state = VCPU_INIT;
}
} else {
pr_err("%s, vcpu id is invalid!\n", __func__);
ret = -EINVAL;
}
return ret;
}

View File

@@ -19,4 +19,13 @@ enum os_kernel_type {
DUMMY,
};
/* TODO: Dummy, to be removed */
#include <schedule.h>
struct acrn_vm_config {
struct sched_params sched_params; /* Scheduler params for vCPUs of this VM */
};
static inline struct acrn_vm_config *get_vm_config(__unused uint16_t vm_id) {
return NULL;
}
#endif /* VM_CONFIG_H_ */

View File

@@ -536,20 +536,6 @@ void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx);
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx);
void load_iwkey(struct acrn_vcpu *vcpu);
/**
* @brief create a vcpu for the target vm
*
* Creates/allocates a vCPU instance, with initialization for its vcpu_id,
* vpid, vmcs, vlapic, etc. It sets the init vCPU state to VCPU_INIT
*
* @param[in] pcpu_id created vcpu will run on this pcpu
* @param[in] vm pointer to vm data structure, this vcpu will owned by this vm.
* @param[out] rtn_vcpu_handle pointer to the created vcpu
*
* @retval 0 vcpu created successfully, other values failed.
*/
int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
/**
* @brief run into non-root mode based on vcpu setting
*

View File

@@ -88,6 +88,24 @@ struct guest_mem_dump {
};
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu);
int32_t arch_init_vcpu(struct acrn_vcpu *vcpu);
void arch_vcpu_thread(struct thread_object *obj);
void arch_context_switch_out(struct thread_object *prev);
void arch_context_switch_in(struct thread_object *next);
uint64_t arch_build_stack_frame(struct acrn_vcpu *vcpu);
/**
* @brief create a vcpu for the target vm
*
* Creates/allocates and initialize a vCPU instance.
*
* @param[in] pcpu_id created vcpu will run on this pcpu
* @param[in] vm pointer to vm data structure
*
* @retval 0 vcpu created successfully, other values failed.
*/
int32_t create_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
/**
* @}