hv: vcpu: Move vcpu common parts under common/vcpu.h (data structure)

This commit cleans up struct acrn_vcpu. vcpu API cleanup will be in
future patch series.

Create a common vcpu.h hosting struct acrn_vcpu, and move some x86
specific members of struct acrn_vcpu into struct acrn_vcpu_arch. These
members includes:

reg_cached
reg_updated
inst_ctxt

And pending_req is being moved from arch to common.

And the maximum number of events (i.e., VCPU_EVENT_NUM) are being
replaced by MAX_VCPU_EVENT_NUM.

To avoid circular dependency, some in-header helpers are moved into
vcpu.c with only prototypes being declared inside header.

Tracked-On: #8830
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Reviewed-by: Fei Li <fei1.li@intel.com>
Acked-by: Wang Yu1 <yu1.wang@intel.com>
This commit is contained in:
Yifan Liu
2025-08-27 01:45:12 +00:00
committed by acrnsi-robot
parent dca091fd90
commit cf91e66ac0
26 changed files with 212 additions and 147 deletions

View File

@@ -8,7 +8,7 @@
#include <errno.h>
#include <asm/vmx.h>
#include <asm/guest/guest_memory.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/vmcs.h>
#include <asm/mmu.h>

View File

@@ -2258,7 +2258,7 @@ static int32_t local_decode_instruction(enum vm_cpu_mode cpu_mode,
static int32_t instr_check_di(struct acrn_vcpu *vcpu)
{
int32_t ret;
struct instr_emul_vie *vie = &vcpu->inst_ctxt.vie;
struct instr_emul_vie *vie = &vcpu->arch.inst_ctxt.vie;
uint64_t gva;
ret = get_gva_di_check(vcpu, vie, vie->addrsize, &gva);
@@ -2278,7 +2278,7 @@ static int32_t instr_check_gva(struct acrn_vcpu *vcpu, enum vm_cpu_mode cpu_mode
uint64_t base, segbase, idx, gva, gpa;
uint32_t err_code;
enum cpu_reg_name seg;
struct instr_emul_vie *vie = &vcpu->inst_ctxt.vie;
struct instr_emul_vie *vie = &vcpu->arch.inst_ctxt.vie;
base = 0UL;
if (vie->base_register != CPU_REG_LAST) {
@@ -2371,7 +2371,7 @@ int32_t decode_instruction(struct acrn_vcpu *vcpu, bool full_decode)
int32_t retval;
enum vm_cpu_mode cpu_mode;
emul_ctxt = &vcpu->inst_ctxt;
emul_ctxt = &vcpu->arch.inst_ctxt;
retval = vie_init(&emul_ctxt->vie, vcpu);
if (retval < 0) {
@@ -2428,7 +2428,7 @@ int32_t decode_instruction(struct acrn_vcpu *vcpu, bool full_decode)
int32_t emulate_instruction(struct acrn_vcpu *vcpu)
{
struct instr_emul_vie *vie = &vcpu->inst_ctxt.vie;
struct instr_emul_vie *vie = &vcpu->arch.inst_ctxt.vie;
int32_t error;
if (vie->decoded != 0U) {
@@ -2487,5 +2487,5 @@ int32_t emulate_instruction(struct acrn_vcpu *vcpu)
bool is_current_opcode_xchg(struct acrn_vcpu *vcpu)
{
return (vcpu->inst_ctxt.vie.op.op_type == VIE_OP_TYPE_XCHG);
return (vcpu->arch.inst_ctxt.vie.op.op_type == VIE_OP_TYPE_XCHG);
}

View File

@@ -5,7 +5,7 @@
*/
#include <types.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/virq.h>
#include <event.h>

View File

@@ -9,7 +9,7 @@
#include <asm/mmu.h>
#include <asm/guest/virq.h>
#include <asm/guest/ept.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/vmcs.h>
#include <asm/guest/nested.h>
@@ -1338,8 +1338,8 @@ static void set_vmcs01_guest_state(struct acrn_vcpu *vcpu)
*/
exec_vmwrite(VMX_GUEST_CR0, vmcs12->host_cr0);
exec_vmwrite(VMX_GUEST_CR4, vmcs12->host_cr4);
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_cached);
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_cached);
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->arch.reg_cached);
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->arch.reg_cached);
exec_vmwrite(VMX_GUEST_CR3, vmcs12->host_cr3);
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
@@ -1395,7 +1395,7 @@ static void set_vmcs01_guest_state(struct acrn_vcpu *vcpu)
}
/*
* For those registers that are managed by the vcpu->reg_updated flag,
* For those registers that are managed by the vcpu->arch.reg_updated flag,
* need to write with vcpu_set_xxx() so that vcpu_get_xxx() can get the
* correct values.
*/

View File

@@ -165,12 +165,12 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
uint32_t i;
/* mark to update on-demand run_context for efer/rflags/rsp/rip/cr0/cr4 */
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_CR0, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_CR4, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->arch.reg_updated);
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->arch.reg_updated);
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->arch.reg_updated);
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->arch.reg_updated);
bitmap_set_non_atomic(CPU_REG_CR0, &vcpu->arch.reg_updated);
bitmap_set_non_atomic(CPU_REG_CR4, &vcpu->arch.reg_updated);
/* VMCS Execution field */
exec_vmwrite64(VMX_TSC_OFFSET_FULL, ext_ctx->tsc_offset);

View File

@@ -14,7 +14,7 @@
#include <asm/board.h>
#include <asm/vm_config.h>
#include <asm/msr.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/vcat.h>
#include <asm/per_cpu.h>

View File

@@ -6,7 +6,7 @@
#include <types.h>
#include <errno.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/virq.h>
#include <bits.h>
#include <asm/vmx.h>
@@ -24,6 +24,46 @@
#include <asm/irq.h>
#include <console.h>
bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BSP_CPU_ID);
}
enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
return vcpu->arch.cpu_mode;
}
/* do not update Guest RIP for next VM Enter */
void vcpu_retain_rip(struct acrn_vcpu *vcpu)
{
(vcpu)->arch.inst_len = 0U;
}
struct acrn_vlapic *vcpu_vlapic(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.vlapic);
}
/**
* @brief Get pointer to PI description.
*
* @param[in] vcpu Target vCPU
*
* @return pointer to PI description
*
* @pre vcpu != NULL
*/
struct pi_desc *get_pi_desc(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.pid);
}
bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu)
{
return vcpu->arch.lapic_pt_enabled;
}
uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
{
const struct run_context *ctx =
@@ -45,8 +85,8 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
struct run_context *ctx =
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (!bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) &&
!bitmap_test_and_set_non_atomic(CPU_REG_RIP, &vcpu->reg_cached)) {
if (!bitmap_test(CPU_REG_RIP, &vcpu->arch.reg_updated) &&
!bitmap_test_and_set_non_atomic(CPU_REG_RIP, &vcpu->arch.reg_cached)) {
ctx->rip = exec_vmread(VMX_GUEST_RIP);
}
return ctx->rip;
@@ -55,7 +95,7 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val;
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RIP, &vcpu->arch.reg_updated);
}
uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu)
@@ -72,7 +112,7 @@ void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
ctx->cpu_regs.regs.rsp = val;
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RSP, &vcpu->arch.reg_updated);
}
uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
@@ -97,7 +137,7 @@ void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
}
/* Write the new value to VMCS in either case */
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_EFER, &vcpu->arch.reg_updated);
}
uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
@@ -105,8 +145,8 @@ uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
struct run_context *ctx =
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (!bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) &&
!bitmap_test_and_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_cached) && vcpu->launched) {
if (!bitmap_test(CPU_REG_RFLAGS, &vcpu->arch.reg_updated) &&
!bitmap_test_and_set_non_atomic(CPU_REG_RFLAGS, &vcpu->arch.reg_cached) && vcpu->launched) {
ctx->rflags = exec_vmread(VMX_GUEST_RFLAGS);
}
return ctx->rflags;
@@ -116,7 +156,7 @@ void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rflags =
val;
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_non_atomic(CPU_REG_RFLAGS, &vcpu->arch.reg_updated);
}
uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr)
@@ -647,16 +687,16 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
struct run_context *ctx =
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_clear_non_atomic(CPU_REG_RIP, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_RIP, &vcpu->arch.reg_updated)) {
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
}
if (bitmap_test_and_clear_non_atomic(CPU_REG_RSP, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_RSP, &vcpu->arch.reg_updated)) {
exec_vmwrite(VMX_GUEST_RSP, ctx->cpu_regs.regs.rsp);
}
if (bitmap_test_and_clear_non_atomic(CPU_REG_EFER, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_EFER, &vcpu->arch.reg_updated)) {
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
}
if (bitmap_test_and_clear_non_atomic(CPU_REG_RFLAGS, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_RFLAGS, &vcpu->arch.reg_updated)) {
exec_vmwrite(VMX_GUEST_RFLAGS, ctx->rflags);
}
@@ -665,11 +705,11 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
* switching. There should no other module request updating
* CR0/CR4 here.
*/
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR0, &vcpu->arch.reg_updated)) {
vcpu_set_cr0(vcpu, ctx->cr0);
}
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_non_atomic(CPU_REG_CR4, &vcpu->arch.reg_updated)) {
vcpu_set_cr4(vcpu, ctx->cr4);
}
}
@@ -686,7 +726,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
int32_t status = 0;
int32_t ibrs_type = get_ibrs_type();
if (vcpu->reg_updated != 0UL) {
if (vcpu->arch.reg_updated != 0UL) {
write_cached_registers(vcpu);
}
@@ -765,7 +805,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
set_vcpu_mode(vcpu, cs_attr, ia32_efer, cr0);
}
vcpu->reg_cached = 0UL;
vcpu->arch.reg_cached = 0UL;
/* Obtain current VCPU instruction length */
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);

View File

@@ -7,7 +7,7 @@
#include <types.h>
#include <errno.h>
#include <bits.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/cpuid.h>
#include <asm/cpufeatures.h>

View File

@@ -7,7 +7,7 @@
#include <types.h>
#include <logmsg.h>
#include <asm/mmu.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/vmexit.h>
#include <asm/guest/ept.h>

View File

@@ -11,7 +11,7 @@
#include <asm/lapic.h>
#include <asm/mmu.h>
#include <asm/vmx.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vmcs.h>
#include <asm/guest/vm.h>
#include <asm/guest/lock_instr_emul.h>
@@ -130,7 +130,7 @@ static inline bool is_nmi_injectable(void)
}
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
{
bitmap_set(eventid, &vcpu->arch.pending_req);
bitmap_set(eventid, &vcpu->pending_req);
kick_vcpu(vcpu);
}
@@ -363,7 +363,7 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
bool injected = false;
int32_t ret = 0;
struct acrn_vcpu_arch *arch = &vcpu->arch;
uint64_t *pending_req_bits = &arch->pending_req;
uint64_t *pending_req_bits = &vcpu->pending_req;
if (*pending_req_bits != 0UL) {
/* make sure ACRN_REQUEST_INIT_VMCS handler as the first one */

View File

@@ -11,7 +11,7 @@
#include <bits.h>
#include <asm/guest/virq.h>
#include <asm/mmu.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/vmx.h>
#include <asm/vtd.h>
@@ -318,7 +318,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t value)
exec_vmwrite(VMX_CR0_READ_SHADOW, effective_cr0);
/* clear read cache, next time read should from VMCS */
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->reg_cached);
bitmap_clear_non_atomic(CPU_REG_CR0, &vcpu->arch.reg_cached);
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR0", effective_cr0, tmp);
}
@@ -420,7 +420,7 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4);
/* clear read cache, next time read should from VMCS */
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->reg_cached);
bitmap_clear_non_atomic(CPU_REG_CR4, &vcpu->arch.reg_cached);
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR4", cr4, tmp);
}
@@ -521,7 +521,7 @@ uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
{
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_set_non_atomic(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
if (bitmap_test_and_set_non_atomic(CPU_REG_CR0, &vcpu->arch.reg_cached) == 0) {
ctx->cr0 = (exec_vmread(VMX_CR0_READ_SHADOW) & ~cr0_passthru_mask) |
(exec_vmread(VMX_GUEST_CR0) & cr0_passthru_mask);
}
@@ -549,7 +549,7 @@ uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
{
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_set_non_atomic(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
if (bitmap_test_and_set_non_atomic(CPU_REG_CR4, &vcpu->arch.reg_cached) == 0) {
ctx->cr4 = (exec_vmread(VMX_CR4_READ_SHADOW) & ~cr4_passthru_mask) |
(exec_vmread(VMX_GUEST_CR4) & cr4_passthru_mask);
}

View File

@@ -511,7 +511,7 @@ static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vect
* send PI notification to vCPU and hardware will
* sync PIR to vIRR automatically.
*/
bitmap_set(ACRN_REQUEST_EVENT, &vcpu->arch.pending_req);
bitmap_set(ACRN_REQUEST_EVENT, &vcpu->pending_req);
if (get_pcpu_id() != pcpuid_from_vcpu(vcpu)) {
apicv_trigger_pi_anv(pcpuid_from_vcpu(vcpu), (uint32_t)vcpu->arch.pid.control.bits.nv);
@@ -2326,7 +2326,7 @@ static bool apicv_basic_has_pending_delivery_intr(struct acrn_vcpu *vcpu)
vcpu_make_request(vcpu, ACRN_REQUEST_EVENT);
}
return vcpu->arch.pending_req != 0UL;
return vcpu->pending_req != 0UL;
}
static bool apicv_advanced_has_pending_delivery_intr(__unused struct acrn_vcpu *vcpu)

View File

@@ -8,6 +8,7 @@
#include <errno.h>
#include <spinlock.h>
#include <vm.h>
#include <vcpu.h>
#include <asm/guest/virq.h>
#include <asm/guest/optee.h>
#include <acrn_hv_defs.h>

View File

@@ -8,7 +8,7 @@
#include <types.h>
#include <asm/guest/vmcs.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/vmx.h>
#include <asm/gdt.h>

View File

@@ -9,7 +9,7 @@
#include <asm/vmx.h>
#include <asm/guest/virq.h>
#include <asm/mmu.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/vmexit.h>
#include <asm/guest/vm_reset.h>
@@ -349,7 +349,7 @@ static int32_t pause_vmexit_handler(__unused struct acrn_vcpu *vcpu)
static int32_t hlt_vmexit_handler(struct acrn_vcpu *vcpu)
{
if ((vcpu->arch.pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) {
if ((vcpu->pending_req == 0UL) && (!vlapic_has_pending_intr(vcpu))) {
wait_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
}
return 0;

View File

@@ -9,7 +9,7 @@
#include <pgtable.h>
#include <asm/msr.h>
#include <asm/cpuid.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/virq.h>
#include <asm/guest/vm.h>
#include <asm/vmx.h>

View File

@@ -8,7 +8,7 @@
#include <asm/msr.h>
#include <asm/pgtable.h>
#include <asm/guest/ept.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <logmsg.h>

View File

@@ -8,7 +8,7 @@
#include <errno.h>
#include <atomic.h>
#include <io_req.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/vm.h>
#include <asm/guest/instr_emul.h>
#include <asm/guest/vmexit.h>

View File

@@ -7,7 +7,7 @@
#include <asm/irq.h>
#include <asm/vmx.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/guest/virq.h>
#include <cpu.h>

View File

@@ -7,7 +7,7 @@
#include <types.h>
#include <asm/per_cpu.h>
#include <asm/mmu.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/vmx.h>
#include <asm/guest/vm.h>
#include <asm/init.h>

View File

@@ -10,8 +10,8 @@
* @brief public APIs for vcpu operations
*/
#ifndef VCPU_H
#define VCPU_H
#ifndef X86_VCPU_H
#define X86_VCPU_H
#ifndef ASSEMBLER
@@ -140,19 +140,6 @@
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
#define foreach_vcpu(idx, vm, vcpu) \
for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
(idx) < (vm)->hw.created_vcpus; \
(idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
if ((vcpu)->state != VCPU_OFFLINE)
enum vcpu_state {
VCPU_OFFLINE = 0U,
VCPU_INIT,
VCPU_RUNNING,
VCPU_ZOMBIE,
};
enum vm_cpu_mode {
CPU_MODE_REAL,
CPU_MODE_PROTECTED,
@@ -278,13 +265,15 @@ struct acrn_vcpu_arch {
uint32_t proc_vm_exec_ctrls;
uint32_t inst_len;
uint64_t reg_cached;
uint64_t reg_updated;
struct instr_emul_ctxt inst_ctxt;
/* Information related to secondary / AP VCPU start-up */
enum vm_cpu_mode cpu_mode;
uint8_t nr_sipi;
/* interrupt injection information */
uint64_t pending_req;
/* List of MSRS to be stored and loaded on VM exits or VM entries */
struct msr_store_area msr_area;
@@ -302,75 +291,16 @@ struct acrn_vcpu_arch {
uint64_t iwkey_copy_status;
} __aligned(PAGE_SIZE);
struct acrn_vm;
struct acrn_vcpu {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
/* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch;
uint16_t vcpu_id; /* virtual identifier for VCPU */
struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
volatile enum vcpu_state state; /* State of this VCPU */
struct thread_object thread_obj;
bool launched; /* Whether the vcpu is launched on target pcpu */
struct instr_emul_ctxt inst_ctxt;
struct io_request req; /* used by io/ept emulation */
uint64_t reg_cached;
uint64_t reg_updated;
struct sched_event events[VCPU_EVENT_NUM];
} __aligned(PAGE_SIZE);
struct vcpu_dump {
struct acrn_vcpu *vcpu;
char *str;
uint32_t str_max;
};
struct guest_mem_dump {
struct acrn_vcpu *vcpu;
uint64_t gva;
uint64_t len;
};
static inline bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BSP_CPU_ID);
}
static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
return vcpu->arch.cpu_mode;
}
struct acrn_vcpu;
enum vcpu_state;
bool is_vcpu_bsp(const struct acrn_vcpu *vcpu);
enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu);
/* do not update Guest RIP for next VM Enter */
static inline void vcpu_retain_rip(struct acrn_vcpu *vcpu)
{
(vcpu)->arch.inst_len = 0U;
}
void vcpu_retain_rip(struct acrn_vcpu *vcpu);
struct acrn_vlapic *vcpu_vlapic(struct acrn_vcpu *vcpu);
static inline struct acrn_vlapic *vcpu_vlapic(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.vlapic);
}
/**
* @brief Get pointer to PI description.
*
* @param[in] vcpu Target vCPU
*
* @return pointer to PI description
*
* @pre vcpu != NULL
*/
static inline struct pi_desc *get_pi_desc(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch.pid);
}
struct pi_desc *get_pi_desc(struct acrn_vcpu *vcpu);
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu);
void vcpu_thread(struct thread_object *obj);
@@ -720,10 +650,7 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask);
* @return true, if vCPU LAPIC is in x2APIC mode and VM, vCPU belongs to, is configured for
* LAPIC Pass-through
*/
static inline bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu)
{
return vcpu->arch.lapic_pt_enabled;
}
bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu);
/**
* @brief handle posted interrupts

View File

@@ -16,7 +16,7 @@
#include <bits.h>
#include <spinlock.h>
#include <asm/pgtable.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <vioapic.h>
#include <vpic.h>
#include <asm/guest/vmx_io.h>

View File

@@ -12,7 +12,7 @@
#ifndef ASSEMBLER
#include <types.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#define VMX_VMENTRY_FAIL 0x80000000U

View File

@@ -0,0 +1,97 @@
/*
* Copyright (C) 2018-2022 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file vcpu.h
*
* @brief public APIs for vcpu operations
*/
#ifndef VCPU_H
#define VCPU_H
#ifndef ASSEMBLER
#include <types.h>
#include <asm/page.h>
#include <schedule.h>
#include <event.h>
#include <io_req.h>
#include <asm/guest/vcpu.h>
/**
* @brief vcpu
*
* @defgroup acrn_vcpu ACRN vcpu
* @{
*/
/*
* VCPU related APIs
*/
#define MAX_VCPU_EVENT_NUM 16
#define foreach_vcpu(idx, vm, vcpu) \
for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \
(idx) < (vm)->hw.created_vcpus; \
(idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
if ((vcpu)->state != VCPU_OFFLINE)
enum vcpu_state {
VCPU_OFFLINE = 0U,
VCPU_INIT,
VCPU_RUNNING,
VCPU_ZOMBIE,
};
struct acrn_vm;
struct acrn_vcpu {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
uint16_t vcpu_id; /* virtual identifier for VCPU */
struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
volatile enum vcpu_state state; /* State of this VCPU */
struct thread_object thread_obj;
bool launched; /* Whether the vcpu is launched on target pcpu */
struct io_request req; /* used by io/ept emulation */
/* pending requests bitmask. Each bit represents one arch-specific request */
uint64_t pending_req;
/* The first half (8) of the events are used for platform-independent
* events, and the latter half for platform-dependent events
*/
struct sched_event events[MAX_VCPU_EVENT_NUM];
/* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch;
} __aligned(PAGE_SIZE);
struct vcpu_dump {
struct acrn_vcpu *vcpu;
char *str;
uint32_t str_max;
};
struct guest_mem_dump {
struct acrn_vcpu *vcpu;
uint64_t gva;
uint64_t len;
};
/**
* @}
*/
/* End of acrn_vcpu */
#endif /* ASSEMBLER */
#endif /* VCPU_H */

View File

@@ -9,7 +9,7 @@
#ifdef PROFILING_ON
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/vm_config.h>
#define MAX_MSR_LIST_NUM 15U

View File

@@ -6,7 +6,7 @@
#include <util.h>
#include <acrn_common.h>
#include <asm/guest/vcpu.h>
#include <vcpu.h>
#include <asm/mmu.h>
#include <asm/guest/trusty.h>
#include <asm/vtd.h>