HV:treewide:rename vcpu data structure

For data structure types "struct vcpu", its name is identical
with variable name in the same scope. This is a MISRA C  violation.

Naming convention rule:If the data structure type is used by multi
modules, its corresponding logic resource is exposed to external
components (such as SOS, UOS), and its name meaning is simplistic
(such as vcpu, vm), its name needs prefix "acrn_".

The following udpates are made:
struct vcpu *vcpu-->struct acrn_vcpu *vcpu

Tracked-On: #861

Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
This commit is contained in:
Xiangyang Wu 2018-11-05 13:25:25 +08:00 committed by lijinxia
parent 969f7cf071
commit fa26a16645
49 changed files with 355 additions and 355 deletions

View File

@ -442,7 +442,7 @@ static void ptdev_intr_handle_irq(struct vm *vm,
void ptdev_softirq(uint16_t pcpu_id)
{
struct vcpu *vcpu = (struct vcpu *)per_cpu(vcpu, pcpu_id);
struct acrn_vcpu *vcpu = (struct acrn_vcpu *)per_cpu(vcpu, pcpu_id);
struct vm *vm = vcpu->vm;
while (1) {

View File

@ -6,7 +6,7 @@
#include <hypervisor.h>
static inline struct vcpuid_entry *find_vcpuid_entry(const struct vcpu *vcpu,
static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcpu,
uint32_t leaf_arg, uint32_t subleaf)
{
uint32_t i = 0U, nr, half;
@ -293,7 +293,7 @@ int set_vcpuid_entries(struct vm *vm)
return 0;
}
void guest_cpuid(struct vcpu *vcpu,
void guest_cpuid(struct acrn_vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{

View File

@ -28,7 +28,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
uint64_t hpa = INVALID_HPA;
uint64_t *pgentry, pg_size = 0UL;
void *eptp;
struct vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
if ((vcpu != NULL) && (vcpu->arch_vcpu.cur_context == SECURE_WORLD)) {
eptp = vm->arch_vm.sworld_eptp;
@ -71,7 +71,7 @@ uint64_t vm0_hpa2gpa(uint64_t hpa)
return hpa;
}
int ept_violation_vmexit_handler(struct vcpu *vcpu)
int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
{
int status = -EINVAL, ret;
uint64_t exit_qual;
@ -158,7 +158,7 @@ out:
return status;
}
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
int ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
{
int status;
@ -182,7 +182,7 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
{
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint64_t prot = prot_orig;
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx size: 0x%016llx prot: 0x%016x\n",
@ -207,7 +207,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint16_t i;
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
@ -223,7 +223,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
*/
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint16_t i;
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);

View File

@ -36,7 +36,7 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
{
uint16_t vcpu_id;
uint64_t dmask = 0UL;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if (vdmask & (1U << vcpu_id)) {
@ -48,7 +48,7 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
return dmask;
}
enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu)
{
enum vm_cpu_mode cpu_mode;
@ -72,7 +72,7 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
* during page walk */
static int local_gva2gpa_common(struct vcpu *vcpu, const struct page_walk_info *pw_info,
static int local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
uint32_t i;
@ -219,7 +219,7 @@ out:
return ret;
}
static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
static int local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *pw_info,
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
{
int index;
@ -269,7 +269,7 @@ out:
* - Return -EFAULT for paging fault, and refer to err_code for paging fault
* error code.
*/
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
uint32_t *err_code)
{
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
@ -385,7 +385,7 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
/*
* @pre vcpu != NULL && err_code != NULL
*/
static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
static inline int copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
uint32_t size_arg, uint32_t *err_code, uint64_t *fault_addr,
bool cp_from_vm)
{
@ -443,13 +443,13 @@ int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
return copy_gpa(vm, h_ptr, gpa, size, 0);
}
int copy_from_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
int copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
{
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 1);
}
int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
{
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 0);

View File

@ -322,7 +322,7 @@ static uint32_t get_vmcs_field(enum cpu_reg_name ident)
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
*/
static uint64_t vm_get_register(const struct vcpu *vcpu, enum cpu_reg_name reg)
static uint64_t vm_get_register(const struct acrn_vcpu *vcpu, enum cpu_reg_name reg)
{
uint64_t reg_val = 0UL;
@ -349,7 +349,7 @@ static uint64_t vm_get_register(const struct vcpu *vcpu, enum cpu_reg_name reg)
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
*/
static void vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg,
static void vm_set_register(struct acrn_vcpu *vcpu, enum cpu_reg_name reg,
uint64_t val)
{
@ -387,7 +387,7 @@ static void vm_get_seg_desc(enum cpu_reg_name seg, struct seg_desc *desc)
desc->access = exec_vmread32(tdesc.access_field);
}
static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
static void get_guest_paging_info(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
uint32_t csar)
{
uint8_t cpl;
@ -492,7 +492,7 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
return 0;
}
static int mmio_read(const struct vcpu *vcpu, uint64_t *rval)
static int mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
{
if (vcpu == NULL) {
return -EINVAL;
@ -502,7 +502,7 @@ static int mmio_read(const struct vcpu *vcpu, uint64_t *rval)
return 0;
}
static int mmio_write(struct vcpu *vcpu, uint64_t wval)
static int mmio_write(struct acrn_vcpu *vcpu, uint64_t wval)
{
if (vcpu == NULL) {
return -EINVAL;
@ -538,7 +538,7 @@ static void vie_calc_bytereg(const struct instr_emul_vie *vie,
}
}
static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul_vie *vie)
static uint8_t vie_read_bytereg(const struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int lhbr;
uint64_t val;
@ -561,7 +561,7 @@ static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul
return reg_val;
}
static void vie_write_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vie,
static void vie_write_bytereg(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie,
uint8_t byte)
{
uint64_t origval, val, mask;
@ -591,7 +591,7 @@ static void vie_write_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vi
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
*/
static void vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
static void vie_update_register(struct acrn_vcpu *vcpu, enum cpu_reg_name reg,
uint64_t val_arg, uint8_t size)
{
uint64_t origval;
@ -616,7 +616,7 @@ static void vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
#define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
static void vie_update_rflags(struct vcpu *vcpu, uint64_t rflags2, uint64_t psl)
static void vie_update_rflags(struct acrn_vcpu *vcpu, uint64_t rflags2, uint64_t psl)
{
uint8_t size;
uint64_t rflags;
@ -664,7 +664,7 @@ static uint64_t getcc(uint8_t opsize, uint64_t x, uint64_t y)
}
}
static int emulate_mov(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_mov(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -780,7 +780,7 @@ static int emulate_mov(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_movx(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_movx(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -883,7 +883,7 @@ static int emulate_movx(struct vcpu *vcpu, const struct instr_emul_vie *vie)
*
* It's only used by MOVS/STO
*/
static void get_gva_si_nocheck(const struct vcpu *vcpu, uint8_t addrsize,
static void get_gva_si_nocheck(const struct acrn_vcpu *vcpu, uint8_t addrsize,
enum cpu_reg_name seg, uint64_t *gva)
{
uint64_t val;
@ -907,7 +907,7 @@ static void get_gva_si_nocheck(const struct vcpu *vcpu, uint8_t addrsize,
*
* It's only used by MOVS/STO
*/
static int get_gva_di_check(struct vcpu *vcpu, struct instr_emul_vie *vie,
static int get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
uint8_t addrsize, uint64_t *gva)
{
int ret;
@ -980,7 +980,7 @@ exception_inject:
* For MOVs instruction, we always check RDI during instruction decoding phase.
* And access RSI without any check during instruction emulation phase.
*/
static int emulate_movs(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
uint64_t src_gva, gpa, val = 0UL;
uint64_t *dst_hva, *src_hva;
@ -1067,7 +1067,7 @@ done:
return error;
}
static int emulate_stos(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_stos(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error, repeat;
uint8_t opsize = vie->opsize;
@ -1121,7 +1121,7 @@ static int emulate_stos(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return 0;
}
static int emulate_test(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_test(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -1187,7 +1187,7 @@ static int emulate_test(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_and(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_and(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -1275,7 +1275,7 @@ static int emulate_and(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_or(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_or(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -1366,7 +1366,7 @@ static int emulate_or(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_cmp(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_cmp(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -1458,7 +1458,7 @@ static int emulate_cmp(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_sub(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_sub(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
uint8_t size;
@ -1512,7 +1512,7 @@ static int emulate_sub(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_group1(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_group1(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
int error;
@ -1534,7 +1534,7 @@ static int emulate_group1(struct vcpu *vcpu, const struct instr_emul_vie *vie)
return error;
}
static int emulate_bittest(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int emulate_bittest(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
{
uint64_t val, rflags, bitmask;
int error;
@ -1580,7 +1580,7 @@ static int emulate_bittest(struct vcpu *vcpu, const struct instr_emul_vie *vie)
static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
{
struct instr_emul_vie *vie = &ctxt->vie;
struct vcpu *vcpu = ctxt->vcpu;
struct acrn_vcpu *vcpu = ctxt->vcpu;
int error;
if (vie->decoded == 0U) {
@ -1628,7 +1628,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
return error;
}
static int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu)
static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
{
uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
uint32_t inst_len = vcpu->arch_vcpu.inst_len;
@ -2182,7 +2182,7 @@ static int local_decode_instruction(enum vm_cpu_mode cpu_mode,
}
/* for instruction MOVS/STO, check the gva gotten from DI/SI. */
static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
static int instr_check_di(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
{
int ret;
struct instr_emul_vie *vie = &emul_ctxt->vie;
@ -2197,7 +2197,7 @@ static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
return 0;
}
static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
static int instr_check_gva(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
enum vm_cpu_mode cpu_mode)
{
int ret;
@ -2283,7 +2283,7 @@ static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
return 0;
}
int decode_instruction(struct vcpu *vcpu)
int decode_instruction(struct acrn_vcpu *vcpu)
{
struct instr_emul_ctxt *emul_ctxt;
uint32_t csar;
@ -2343,7 +2343,7 @@ int decode_instruction(struct vcpu *vcpu)
return (int)(emul_ctxt->vie.opsize);
}
int emulate_instruction(const struct vcpu *vcpu)
int emulate_instruction(const struct acrn_vcpu *vcpu)
{
struct instr_emul_ctxt *ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);

View File

@ -190,10 +190,10 @@ struct vm_guest_paging {
struct instr_emul_ctxt {
struct instr_emul_vie vie;
struct vm_guest_paging paging;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
};
int emulate_instruction(const struct vcpu *vcpu);
int decode_instruction(struct vcpu *vcpu);
int emulate_instruction(const struct acrn_vcpu *vcpu);
int decode_instruction(struct acrn_vcpu *vcpu);
#endif

View File

@ -32,7 +32,7 @@ static inline size_t get_ucode_data_size(const struct ucode_header *uhdr)
return ((uhdr->data_size != 0U) ? uhdr->data_size : 2000U);
}
void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v)
{
uint64_t gva, fault_addr;
struct ucode_header uhdr;

View File

@ -10,7 +10,7 @@
vm_sw_loader_t vm_sw_loader;
inline uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg)
inline uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
{
const struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -18,7 +18,7 @@ inline uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg)
return ctx->guest_cpu_regs.longs[reg];
}
inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
inline void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -26,7 +26,7 @@ inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
ctx->guest_cpu_regs.longs[reg] = val;
}
inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -37,13 +37,13 @@ inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
return ctx->rip;
}
inline void vcpu_set_rip(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rip = val;
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
inline uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -51,7 +51,7 @@ inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
return ctx->guest_cpu_regs.regs.rsp;
}
inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -60,7 +60,7 @@ inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -71,14 +71,14 @@ inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
return ctx->ia32_efer;
}
inline void vcpu_set_efer(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.ia32_efer
= val;
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
{
struct run_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
@ -90,14 +90,14 @@ inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
return ctx->rflags;
}
inline void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rflags =
val;
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
}
inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
inline uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
{
uint64_t mask;
struct run_context *ctx =
@ -111,23 +111,23 @@ inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
return ctx->cr0;
}
inline void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val)
{
vmx_write_cr0(vcpu, val);
}
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
inline uint64_t vcpu_get_cr2(struct acrn_vcpu *vcpu)
{
return vcpu->
arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2;
}
inline void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2 = val;
}
inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
inline uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
{
uint64_t mask;
struct run_context *ctx =
@ -141,29 +141,29 @@ inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
return ctx->cr4;
}
inline void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
{
vmx_write_cr4(vcpu, val);
}
inline uint64_t vcpu_get_pat_ext(const struct vcpu *vcpu)
inline uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu)
{
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
ext_ctx.ia32_pat;
}
inline void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val)
inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx.ia32_pat
= val;
}
struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
{
return per_cpu(ever_run_vcpu, pcpu_id);
}
static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efer,
static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efer,
uint64_t cr0)
{
if (ia32_efer & MSR_IA32_EFER_LMA_BIT) {
@ -178,7 +178,7 @@ static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efe
}
}
void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
{
struct ext_context *ectx;
struct run_context *ctx;
@ -280,12 +280,12 @@ static struct acrn_vcpu_regs realmode_init_regs = {
.cr4 = 0UL,
};
void reset_vcpu_regs(struct vcpu *vcpu)
void reset_vcpu_regs(struct acrn_vcpu *vcpu)
{
set_vcpu_regs(vcpu, &realmode_init_regs);
}
void set_ap_entry(struct vcpu *vcpu, uint64_t entry)
void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
{
struct ext_context *ectx;
@ -311,9 +311,9 @@ void set_ap_entry(struct vcpu *vcpu, uint64_t entry)
* for physical CPU 1 : vcpu->pcpu_id = 1, vcpu->vcpu_id = 1, vmid = 1;
*
***********************************************************************/
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint16_t vcpu_id;
pr_info("Creating VCPU working on PCPU%hu", pcpu_id);
@ -329,7 +329,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
}
/* Allocate memory for VCPU */
vcpu = &(vm->hw.vcpu_array[vcpu_id]);
(void)memset((void *)vcpu, 0U, sizeof(struct vcpu));
(void)memset((void *)vcpu, 0U, sizeof(struct acrn_vcpu));
/* Initialize CPU ID for this VCPU */
vcpu->vcpu_id = vcpu_id;
@ -387,7 +387,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
/*
* @pre vcpu != NULL
*/
int run_vcpu(struct vcpu *vcpu)
int run_vcpu(struct acrn_vcpu *vcpu)
{
uint32_t instlen, cs_attr;
uint64_t rip, ia32_efer, cr0;
@ -487,7 +487,7 @@ int run_vcpu(struct vcpu *vcpu)
return status;
}
int shutdown_vcpu(__unused struct vcpu *vcpu)
int shutdown_vcpu(__unused struct acrn_vcpu *vcpu)
{
/* TODO : Implement VCPU shutdown sequence */
@ -497,7 +497,7 @@ int shutdown_vcpu(__unused struct vcpu *vcpu)
/*
* @pre vcpu != NULL
*/
void offline_vcpu(struct vcpu *vcpu)
void offline_vcpu(struct acrn_vcpu *vcpu)
{
vlapic_free(vcpu);
per_cpu(ever_run_vcpu, vcpu->pcpu_id) = NULL;
@ -508,7 +508,7 @@ void offline_vcpu(struct vcpu *vcpu)
/* NOTE:
* vcpu should be paused before call this function.
*/
void reset_vcpu(struct vcpu *vcpu)
void reset_vcpu(struct acrn_vcpu *vcpu)
{
int i;
struct acrn_vlapic *vlapic;
@ -546,7 +546,7 @@ void reset_vcpu(struct vcpu *vcpu)
reset_vcpu_regs(vcpu);
}
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
{
uint16_t pcpu_id = get_cpu_id();
@ -572,7 +572,7 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
}
}
void resume_vcpu(struct vcpu *vcpu)
void resume_vcpu(struct acrn_vcpu *vcpu)
{
pr_dbg("vcpu%hu resumed", vcpu->vcpu_id);
@ -586,7 +586,7 @@ void resume_vcpu(struct vcpu *vcpu)
release_schedule_lock(vcpu->pcpu_id);
}
void schedule_vcpu(struct vcpu *vcpu)
void schedule_vcpu(struct acrn_vcpu *vcpu)
{
vcpu->state = VCPU_RUNNING;
pr_dbg("vcpu%hu scheduled", vcpu->vcpu_id);
@ -601,7 +601,7 @@ void schedule_vcpu(struct vcpu *vcpu)
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
{
int ret = 0;
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
ret = create_vcpu(pcpu_id, vm, &vcpu);
if (ret != 0) {
@ -619,7 +619,7 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
return ret;
}
void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id)
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id)
{
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
}
@ -633,7 +633,7 @@ void vcpu_dumpreg(void *data)
uint64_t i, fault_addr, tmp[DUMPREG_SP_SIZE];
uint32_t err_code = 0;
struct vcpu_dump *dump = data;
struct vcpu *vcpu = dump->vcpu;
struct acrn_vcpu *vcpu = dump->vcpu;
char *str = dump->str;
size_t len, size = dump->str_max;

View File

@ -109,7 +109,7 @@ static inline bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
static struct acrn_vlapic *
vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
vcpu = vcpu_from_vid(vm, vcpu_id);
@ -119,7 +119,7 @@ vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
{
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
@ -138,7 +138,7 @@ vm_active_cpus(const struct vm *vm)
{
uint64_t dmask = 0UL;
uint16_t i;
const struct vcpu *vcpu;
const struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
bitmap_set_lock(vcpu->vcpu_id, &dmask);
@ -163,7 +163,7 @@ vlapic_get_apicid(struct acrn_vlapic *vlapic)
static inline uint32_t
vlapic_build_id(const struct acrn_vlapic *vlapic)
{
const struct vcpu *vcpu = vlapic->vcpu;
const struct acrn_vcpu *vcpu = vlapic->vcpu;
uint8_t vlapic_id;
uint32_t lapic_regs_id;
@ -560,7 +560,7 @@ void vlapic_post_intr(uint16_t dest_pcpu_id)
*
* @pre vcpu != NULL
*/
uint64_t apicv_get_pir_desc_paddr(struct vcpu *vcpu)
uint64_t apicv_get_pir_desc_paddr(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic;
@ -757,7 +757,7 @@ static void
vlapic_fire_lvt(struct acrn_vlapic *vlapic, uint32_t lvt)
{
uint32_t vec, mode;
struct vcpu *vcpu = vlapic->vcpu;
struct acrn_vcpu *vcpu = vlapic->vcpu;
if ((lvt & APIC_LVT_M) != 0U) {
return;
@ -938,7 +938,7 @@ static int
vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t vector)
{
uint32_t lvt;
struct vcpu *vcpu = vlapic->vcpu;
struct acrn_vcpu *vcpu = vlapic->vcpu;
if (vlapic_enabled(vlapic) == false) {
/*
@ -1150,7 +1150,7 @@ vlapic_set_cr8(struct acrn_vlapic *vlapic, uint64_t val)
uint32_t tpr;
if ((val & ~0xfUL) != 0U) {
struct vcpu *vcpu = vlapic->vcpu;
struct acrn_vcpu *vcpu = vlapic->vcpu;
vcpu_inject_gp(vcpu, 0U);
return;
}
@ -1170,7 +1170,7 @@ vlapic_get_cr8(const struct acrn_vlapic *vlapic)
}
static void
vlapic_process_init_sipi(struct vcpu* target_vcpu, uint32_t mode,
vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
uint32_t icr_low, uint16_t vcpu_id)
{
if (mode == APIC_DELMODE_INIT) {
@ -1224,7 +1224,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
uint32_t icr_low, icr_high, dest;
uint32_t vec, mode, shorthand;
struct lapic_regs *lapic;
struct vcpu *target_vcpu;
struct acrn_vcpu *target_vcpu;
lapic = &(vlapic->apic_page);
lapic->icr_lo.v &= ~APIC_DELSTAT_PEND;
@ -1803,7 +1803,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
bool lowprio;
uint16_t vcpu_id;
uint64_t dmask;
struct vcpu *target_vcpu;
struct acrn_vcpu *target_vcpu;
if ((delmode != IOAPIC_RTE_DELFIXED) &&
(delmode != IOAPIC_RTE_DELLOPRI) &&
@ -1885,7 +1885,7 @@ vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic)
void
vlapic_reset_tmr(struct acrn_vlapic *vlapic)
{
struct vcpu *vcpu = vlapic->vcpu;
struct acrn_vcpu *vcpu = vlapic->vcpu;
uint32_t vector;
dev_dbg(ACRN_DBG_LAPIC,
@ -1927,7 +1927,7 @@ vlapic_set_tmr_one_vec(struct acrn_vlapic *vlapic, uint32_t delmode,
}
int
vlapic_set_intr(struct vcpu *vcpu, uint32_t vector, bool level)
vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level)
{
struct acrn_vlapic *vlapic;
@ -2052,7 +2052,7 @@ vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
/* interrupt context */
static void vlapic_timer_expired(void *data)
{
struct vcpu *vcpu = (struct vcpu *)data;
struct acrn_vcpu *vcpu = (struct acrn_vcpu *)data;
struct acrn_vlapic *vlapic;
struct lapic_regs *lapic;
@ -2101,7 +2101,7 @@ vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
uint32_t icr_low = val;
uint32_t mode = icr_low & APIC_DELMODE_MASK;
uint16_t vcpu_id;
struct vcpu *target_vcpu;
struct acrn_vcpu *target_vcpu;
bool phys;
uint32_t shorthand;
@ -2135,7 +2135,7 @@ vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
}
#endif
static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write,
static int vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write,
uint64_t *val)
{
struct acrn_vlapic *vlapic;
@ -2172,7 +2172,7 @@ static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write,
}
int
vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval)
vlapic_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
{
int error = 0;
struct acrn_vlapic *vlapic;
@ -2204,7 +2204,7 @@ vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval)
}
int
vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval)
vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
{
int error = 0;
struct acrn_vlapic *vlapic;
@ -2236,7 +2236,7 @@ vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval)
return error;
}
int vlapic_create(struct vcpu *vcpu)
int vlapic_create(struct acrn_vcpu *vcpu)
{
vcpu->arch_vcpu.vlapic.vm = vcpu->vm;
vcpu->arch_vcpu.vlapic.vcpu = vcpu;
@ -2263,7 +2263,7 @@ int vlapic_create(struct vcpu *vcpu)
/*
* @pre vcpu != NULL
*/
void vlapic_free(struct vcpu *vcpu)
void vlapic_free(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = NULL;
@ -2442,7 +2442,7 @@ vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
}
}
int apic_access_vmexit_handler(struct vcpu *vcpu)
int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
uint32_t offset = 0U;
@ -2487,7 +2487,7 @@ int apic_access_vmexit_handler(struct vcpu *vcpu)
return err;
}
int veoi_vmexit_handler(struct vcpu *vcpu)
int veoi_vmexit_handler(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = NULL;
@ -2520,7 +2520,7 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
{
struct lapic_regs *lapic;
uint32_t vector;
struct vcpu *target_vcpu;
struct acrn_vcpu *target_vcpu;
lapic = &(vlapic->apic_page);
vector = lapic->self_ipi.v & 0xFFU;
@ -2528,7 +2528,7 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
vlapic_set_intr(target_vcpu, vector, LAPIC_TRIG_EDGE);
}
int apic_write_vmexit_handler(struct vcpu *vcpu)
int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t qual;
int error, handled;
@ -2598,7 +2598,7 @@ int apic_write_vmexit_handler(struct vcpu *vcpu)
return handled;
}
int tpr_below_threshold_vmexit_handler(__unused struct vcpu *vcpu)
int tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu)
{
pr_err("Unhandled %s.", __func__);
return 0;

View File

@ -189,7 +189,7 @@ int shutdown_vm(struct vm *vm)
{
int status = 0;
uint16_t i;
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
pause_vm(vm);
@ -235,7 +235,7 @@ int shutdown_vm(struct vm *vm)
*/
int start_vm(struct vm *vm)
{
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
vm->state = VM_STARTED;
@ -252,7 +252,7 @@ int start_vm(struct vm *vm)
int reset_vm(struct vm *vm)
{
int i;
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
if (vm->state != VM_PAUSED) {
return -1;
@ -279,7 +279,7 @@ int reset_vm(struct vm *vm)
void pause_vm(struct vm *vm)
{
uint16_t i;
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
if (vm->state == VM_PAUSED) {
return;
@ -298,7 +298,7 @@ void pause_vm(struct vm *vm)
void resume_vm(struct vm *vm)
{
uint16_t i;
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
foreach_vcpu(i, vm, vcpu) {
resume_vcpu(vcpu);
@ -323,7 +323,7 @@ void resume_vm(struct vm *vm)
*/
void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec)
{
struct vcpu *bsp = vcpu_from_vid(vm, 0U);
struct acrn_vcpu *bsp = vcpu_from_vid(vm, 0U);
vm->state = VM_STARTED;

View File

@ -12,7 +12,7 @@
* This function should always return 0 since we shouldn't
* deal with hypercall error in hypervisor.
*/
int vmcall_vmexit_handler(struct vcpu *vcpu)
int vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
{
int32_t ret = -EACCES;
struct vm *vm = vcpu->vm;

View File

@ -132,7 +132,7 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode)
}
}
void init_msr_emulation(struct vcpu *vcpu)
void init_msr_emulation(struct acrn_vcpu *vcpu)
{
uint32_t i;
uint32_t msrs_count = ARRAY_SIZE(emulated_msrs);
@ -184,7 +184,7 @@ void init_msr_emulation(struct vcpu *vcpu)
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
}
int rdmsr_vmexit_handler(struct vcpu *vcpu)
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
uint32_t msr;
@ -297,7 +297,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
return err;
}
int wrmsr_vmexit_handler(struct vcpu *vcpu)
int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
{
int err = 0;
uint32_t msr;
@ -428,7 +428,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
return err;
}
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu)
void update_msr_bitmap_x2apic_apicv(struct acrn_vcpu *vcpu)
{
uint8_t *msr_bitmap;
@ -447,7 +447,7 @@ void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu)
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_SELF_IPI, READ);
}
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu)
void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu)
{
uint32_t msr;
uint8_t *msr_bitmap;

View File

@ -24,7 +24,7 @@ static void complete_ioreq(struct vhm_request *vhm_req)
* request having transferred to the COMPLETE state.
*/
static void
emulate_pio_post(struct vcpu *vcpu, const struct io_request *io_req)
emulate_pio_post(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
const struct pio_request *pio_req = &io_req->reqs.pio;
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
@ -46,7 +46,7 @@ emulate_pio_post(struct vcpu *vcpu, const struct io_request *io_req)
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
void dm_emulate_pio_post(struct vcpu *vcpu)
void dm_emulate_pio_post(struct acrn_vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
union vhm_request_buffer *req_buf = NULL;
@ -77,7 +77,7 @@ void dm_emulate_pio_post(struct vcpu *vcpu)
* either a previous call to emulate_io() returning 0 or the corresponding VHM
* request transferring to the COMPLETE state.
*/
void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
const struct mmio_request *mmio_req = &io_req->reqs.mmio;
@ -97,7 +97,7 @@ void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
void dm_emulate_mmio_post(struct vcpu *vcpu)
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu)
{
uint16_t cur = vcpu->vcpu_id;
struct io_request *io_req = &vcpu->req;
@ -132,7 +132,7 @@ static void io_instr_dest_handler(struct io_request *io_req)
*
* @param vcpu The virtual CPU that triggers the MMIO access
*/
void emulate_io_post(struct vcpu *vcpu)
void emulate_io_post(struct acrn_vcpu *vcpu)
{
union vhm_request_buffer *req_buf;
struct vhm_request *vhm_req;
@ -191,7 +191,7 @@ void emulate_io_post(struct vcpu *vcpu)
* @return -EIO - The request spans multiple devices and cannot be emulated.
*/
int32_t
hv_emulate_pio(const struct vcpu *vcpu, struct io_request *io_req)
hv_emulate_pio(const struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int32_t status = -ENODEV;
uint16_t port, size;
@ -249,7 +249,7 @@ hv_emulate_pio(const struct vcpu *vcpu, struct io_request *io_req)
* @return -EIO - The request spans multiple devices and cannot be emulated.
*/
static int32_t
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int status = -ENODEV;
uint64_t address, size;
@ -299,7 +299,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
* @return Negative on other errors during emulation.
*/
int32_t
emulate_io(struct vcpu *vcpu, struct io_request *io_req)
emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
{
int32_t status;
@ -357,7 +357,7 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
*
* @param vcpu The virtual CPU which triggers the VM exit on I/O instruction
*/
int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
{
int32_t status;
uint64_t exit_qual;

View File

@ -384,7 +384,7 @@ void dispatch_exception(struct intr_excp_ctx *ctx)
void partition_mode_dispatch_interrupt(struct intr_excp_ctx *ctx)
{
uint8_t vr = ctx->vector;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
/*
* There is no vector and APIC ID remapping for VMs in

View File

@ -168,7 +168,7 @@ void flush_vpid_global(void)
local_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
}
void invept(const struct vcpu *vcpu)
void invept(const struct acrn_vcpu *vcpu)
{
struct invept_desc desc = {0};

View File

@ -63,23 +63,23 @@ get_subrange_start_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
get_subrange_size_of_fixed_mtrr(index));
}
static inline bool is_mtrr_enabled(const struct vcpu *vcpu)
static inline bool is_mtrr_enabled(const struct acrn_vcpu *vcpu)
{
return (vcpu->mtrr.def_type.bits.enable != 0U);
}
static inline bool is_fixed_range_mtrr_enabled(const struct vcpu *vcpu)
static inline bool is_fixed_range_mtrr_enabled(const struct acrn_vcpu *vcpu)
{
return ((vcpu->mtrr.cap.bits.fix != 0U) &&
(vcpu->mtrr.def_type.bits.fixed_enable != 0U));
}
static inline uint8_t get_default_memory_type(const struct vcpu *vcpu)
static inline uint8_t get_default_memory_type(const struct acrn_vcpu *vcpu)
{
return (uint8_t)(vcpu->mtrr.def_type.bits.type);
}
void init_mtrr(struct vcpu *vcpu)
void init_mtrr(struct acrn_vcpu *vcpu)
{
union mtrr_cap_reg cap = {0};
uint32_t i;
@ -154,7 +154,7 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
return attr;
}
static void update_ept_mem_type(const struct vcpu *vcpu)
static void update_ept_mem_type(const struct acrn_vcpu *vcpu)
{
uint8_t type;
uint64_t start, size;
@ -193,7 +193,7 @@ static void update_ept_mem_type(const struct vcpu *vcpu)
}
}
void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
void mtrr_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t value)
{
uint32_t index;
@ -236,7 +236,7 @@ void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
}
}
uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr)
uint64_t mtrr_rdmsr(const struct acrn_vcpu *vcpu, uint32_t msr)
{
const struct mtrr_state *mtrr = &vcpu->mtrr;
uint64_t ret = 0UL;

View File

@ -181,7 +181,7 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
}
static void save_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
{
/* cache on-demand run_context for efer/rflags/rsp/rip */
(void)vcpu_get_efer(vcpu);
@ -235,7 +235,7 @@ static void save_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static void load_world_ctx(struct vcpu *vcpu, const struct ext_context *ext_ctx)
static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
{
/* mark to update on-demand run_context for efer/rflags/rsp */
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
@ -291,7 +291,7 @@ static void copy_smc_param(const struct run_context *prev_ctx,
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
}
void switch_world(struct vcpu *vcpu, int next_world)
void switch_world(struct acrn_vcpu *vcpu, int next_world)
{
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
@ -327,7 +327,7 @@ void switch_world(struct vcpu *vcpu, int next_world)
/* Put key_info and trusty_startup_param in the first Page of Trusty
* runtime memory
*/
static bool setup_trusty_info(struct vcpu *vcpu,
static bool setup_trusty_info(struct acrn_vcpu *vcpu,
uint32_t mem_size, uint64_t mem_base_hpa)
{
uint32_t i;
@ -381,7 +381,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
* RIP, RSP and RDI are specified below, other GP registers are leaved
* as 0.
*/
static bool init_secure_world_env(struct vcpu *vcpu,
static bool init_secure_world_env(struct acrn_vcpu *vcpu,
uint64_t entry_gpa,
uint64_t base_hpa,
uint32_t size)
@ -398,7 +398,7 @@ static bool init_secure_world_env(struct vcpu *vcpu,
return setup_trusty_info(vcpu, size, base_hpa);
}
bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
{
uint64_t trusty_entry_gpa, trusty_base_gpa, trusty_base_hpa;
uint32_t trusty_mem_size;
@ -477,7 +477,7 @@ void trusty_set_dseed(const void *dseed, uint8_t dseed_num)
dseed, sizeof(struct seed_info) * dseed_num);
}
void save_sworld_context(struct vcpu *vcpu)
void save_sworld_context(struct acrn_vcpu *vcpu)
{
(void)memcpy_s(&vcpu->vm->sworld_snapshot,
sizeof(struct cpu_context),
@ -485,7 +485,7 @@ void save_sworld_context(struct vcpu *vcpu)
sizeof(struct cpu_context));
}
void restore_sworld_context(struct vcpu *vcpu)
void restore_sworld_context(struct acrn_vcpu *vcpu)
{
struct secure_world_control *sworld_ctl =
&vcpu->vm->sworld_control;

View File

@ -49,7 +49,7 @@ static const uint16_t exception_type[32] = {
[31] = VMX_INT_TYPE_HW_EXP
};
static bool is_guest_irq_enabled(struct vcpu *vcpu)
static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
{
uint64_t guest_rflags, guest_state;
bool status = false;
@ -70,7 +70,7 @@ static bool is_guest_irq_enabled(struct vcpu *vcpu)
return status;
}
static bool vcpu_pending_request(struct vcpu *vcpu)
static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic;
uint32_t vector = 0U;
@ -91,7 +91,7 @@ static bool vcpu_pending_request(struct vcpu *vcpu)
return vcpu->arch_vcpu.pending_req != 0UL;
}
void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
{
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
/*
@ -108,7 +108,7 @@ void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
}
}
static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
static int vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
{
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
uint32_t vector = 0U;
@ -148,10 +148,10 @@ static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
return 0;
}
static int vcpu_do_pending_extint(const struct vcpu *vcpu)
static int vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
{
struct vm *vm;
struct vcpu *primary;
struct acrn_vcpu *primary;
uint32_t vector;
vm = vcpu->vm;
@ -205,7 +205,7 @@ static int get_excep_class(uint32_t vector)
}
}
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code)
int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code)
{
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
/* VECTOR_INVALID is also greater than 32 */
@ -250,7 +250,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code)
return 0;
}
static void vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
{
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
@ -266,7 +266,7 @@ static void vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
vcpu_retain_rip(vcpu);
}
static int vcpu_inject_hi_exception(struct vcpu *vcpu)
static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
{
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
@ -278,7 +278,7 @@ static int vcpu_inject_hi_exception(struct vcpu *vcpu)
return 0;
}
static int vcpu_inject_lo_exception(struct vcpu *vcpu)
static int vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
{
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
@ -292,26 +292,26 @@ static int vcpu_inject_lo_exception(struct vcpu *vcpu)
}
/* Inject external interrupt to guest */
void vcpu_inject_extint(struct vcpu *vcpu)
void vcpu_inject_extint(struct acrn_vcpu *vcpu)
{
vcpu_make_request(vcpu, ACRN_REQUEST_EXTINT);
}
/* Inject NMI to guest */
void vcpu_inject_nmi(struct vcpu *vcpu)
void vcpu_inject_nmi(struct acrn_vcpu *vcpu)
{
vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
}
/* Inject general protection exception(#GP) to guest */
void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code)
void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code)
{
(void)vcpu_queue_exception(vcpu, IDT_GP, err_code);
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
/* Inject page fault exception(#PF) to guest */
void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code)
void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code)
{
vcpu_set_cr2(vcpu, addr);
(void)vcpu_queue_exception(vcpu, IDT_PF, err_code);
@ -319,27 +319,27 @@ void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code)
}
/* Inject invalid opcode exception(#UD) to guest */
void vcpu_inject_ud(struct vcpu *vcpu)
void vcpu_inject_ud(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_UD, 0);
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
/* Inject alignment check exception(#AC) to guest */
void vcpu_inject_ac(struct vcpu *vcpu)
void vcpu_inject_ac(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_AC, 0);
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
/* Inject stack fault exception(#SS) to guest */
void vcpu_inject_ss(struct vcpu *vcpu)
void vcpu_inject_ss(struct acrn_vcpu *vcpu)
{
(void)vcpu_queue_exception(vcpu, IDT_SS, 0);
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
}
int interrupt_window_vmexit_handler(struct vcpu *vcpu)
int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t value32;
@ -357,7 +357,7 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
return 0;
}
int external_interrupt_vmexit_handler(struct vcpu *vcpu)
int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t intr_info;
struct intr_excp_ctx ctx;
@ -389,7 +389,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
return 0;
}
int acrn_handle_pending_request(struct vcpu *vcpu)
int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
{
int ret = 0;
uint32_t tmp;
@ -544,7 +544,7 @@ INTR_WIN:
return ret;
}
void cancel_event_injection(struct vcpu *vcpu)
void cancel_event_injection(struct acrn_vcpu *vcpu)
{
uint32_t intinfo;
@ -572,7 +572,7 @@ void cancel_event_injection(struct vcpu *vcpu)
/*
* @pre vcpu != NULL
*/
int exception_vmexit_handler(struct vcpu *vcpu)
int exception_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t intinfo, int_err_code = 0U;
uint32_t exception_vector = VECTOR_INVALID;

View File

@ -12,8 +12,8 @@
*/
#define NR_VMX_EXIT_REASONS 65U
static int unhandled_vmexit_handler(struct vcpu *vcpu);
static int xsetbv_vmexit_handler(struct vcpu *vcpu);
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@ -151,7 +151,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
.handler = unhandled_vmexit_handler}
};
int vmexit_handler(struct vcpu *vcpu)
int vmexit_handler(struct acrn_vcpu *vcpu)
{
struct vm_exit_dispatch *dispatch = NULL;
uint16_t basic_exit_reason;
@ -227,7 +227,7 @@ int vmexit_handler(struct vcpu *vcpu)
return ret;
}
static int unhandled_vmexit_handler(struct vcpu *vcpu)
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
{
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016llx ",
exec_vmread(VMX_GUEST_RIP));
@ -242,7 +242,7 @@ static int unhandled_vmexit_handler(struct vcpu *vcpu)
return 0;
}
int cpuid_vmexit_handler(struct vcpu *vcpu)
int cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t rax, rbx, rcx, rdx;
@ -262,7 +262,7 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
return 0;
}
int cr_access_vmexit_handler(struct vcpu *vcpu)
int cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint64_t reg;
uint32_t idx;
@ -317,7 +317,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
* XSETBV instruction set's the XCR0 that is used to tell for which
* components states can be saved on a context switch using xsave.
*/
static int xsetbv_vmexit_handler(struct vcpu *vcpu)
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
{
int idx;
uint64_t val64;

View File

@ -26,8 +26,8 @@ static uint64_t cr4_host_mask;
static uint64_t cr4_always_on_mask;
static uint64_t cr4_always_off_mask;
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu);
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu);
void update_msr_bitmap_x2apic_apicv(struct acrn_vcpu *vcpu);
void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu);
bool is_vmx_disabled(void)
{
@ -86,7 +86,7 @@ void exec_vmxon_instr(uint16_t pcpu_id)
uint32_t tmp32;
void *vmxon_region_va = (void *)per_cpu(vmxon_region, pcpu_id);
uint64_t vmxon_region_pa;
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
/* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */
tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
@ -109,7 +109,7 @@ void exec_vmxon_instr(uint16_t pcpu_id)
void vmx_off(uint16_t pcpu_id)
{
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
uint64_t vmcs_pa;
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
@ -253,7 +253,7 @@ static void init_cr0_cr4_host_mask(void)
pr_dbg("CR4 mask value: 0x%016llx", cr4_host_mask);
}
uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu)
uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu)
{
/*
* note: if context->cr0.CD is set, the actual value in guest's
@ -263,7 +263,7 @@ uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu)
return vcpu_get_pat_ext(vcpu);
}
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
{
uint32_t i;
uint64_t field;
@ -291,7 +291,7 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
return 0;
}
static void load_pdptrs(const struct vcpu *vcpu)
static void load_pdptrs(const struct acrn_vcpu *vcpu)
{
uint64_t guest_cr3 = exec_vmread(VMX_GUEST_CR3);
/* TODO: check whether guest cr3 is valid */
@ -303,7 +303,7 @@ static void load_pdptrs(const struct vcpu *vcpu)
exec_vmwrite64(VMX_GUEST_PDPTE3_FULL, get_pgentry(guest_cr3_hva + 3UL));
}
static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
{
/* Shouldn't set always off bit */
if ((cr0 & cr0_always_off_mask) != 0UL) {
@ -359,7 +359,7 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
* - PG (31) Trapped to track cpu/paging mode.
* Set the value according to the value from guest.
*/
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
{
uint64_t cr0_vmx;
uint32_t entry_ctrls;
@ -450,7 +450,7 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
cr0, cr0_vmx);
}
static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
{
/* Check if guest try to set fixed to 0 bits or reserved bits */
if ((cr4 & cr4_always_off_mask) != 0U) {
@ -511,7 +511,7 @@ static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
* - SMAP (21) Flexible to guest
* - PKE (22) Flexible to guest
*/
void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
{
uint64_t cr4_vmx;
uint64_t old_cr4 = vcpu_get_cr4(vcpu);
@ -545,7 +545,7 @@ void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
}
/* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */
static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3,
static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
uint64_t cr4)
{
struct cpu_context *ctx =
@ -590,7 +590,7 @@ static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3,
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
}
static void init_guest_state(struct vcpu *vcpu)
static void init_guest_state(struct acrn_vcpu *vcpu)
{
struct cpu_context *ctx =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
@ -758,7 +758,7 @@ static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req)
}
static void init_exec_ctrl(struct vcpu *vcpu)
static void init_exec_ctrl(struct acrn_vcpu *vcpu)
{
uint32_t value32;
uint64_t value64;
@ -948,7 +948,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
}
static void init_entry_ctrl(__unused const struct vcpu *vcpu)
static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
{
uint32_t value32;
@ -1028,7 +1028,7 @@ static void init_exit_ctrl(void)
/**
* @pre vcpu != NULL
*/
void init_vmcs(struct vcpu *vcpu)
void init_vmcs(struct acrn_vcpu *vcpu)
{
uint64_t vmx_rev_id;
uint64_t vmcs_pa;
@ -1057,7 +1057,7 @@ void init_vmcs(struct vcpu *vcpu)
}
#ifndef CONFIG_PARTITION_MODE
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
@ -1067,7 +1067,7 @@ void switch_apicv_mode_x2apic(struct vcpu *vcpu)
update_msr_bitmap_x2apic_apicv(vcpu);
}
#else
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;
if(vcpu->vm->vm_desc->lapic_pt) {

View File

@ -17,7 +17,7 @@ static int efi_initialized;
void efi_spurious_handler(int vector)
{
struct vcpu* vcpu;
struct acrn_vcpu* vcpu;
int ret;
if (get_cpu_id() != 0)
@ -39,7 +39,7 @@ void efi_spurious_handler(int vector)
int uefi_sw_loader(struct vm *vm)
{
int ret = 0;
struct vcpu *vcpu = get_primary_vcpu(vm);
struct acrn_vcpu *vcpu = get_primary_vcpu(vm);
struct acrn_vcpu_regs *vcpu_regs = &vm0_boot_context;
ASSERT(vm != NULL, "Incorrect argument");

View File

@ -8,7 +8,7 @@
#include <schedule.h>
#include <softirq.h>
static void run_vcpu_pre_work(struct vcpu *vcpu)
static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
{
uint64_t *pending_pre_work = &vcpu->pending_pre_work;
@ -17,7 +17,7 @@ static void run_vcpu_pre_work(struct vcpu *vcpu)
}
}
void vcpu_thread(struct vcpu *vcpu)
void vcpu_thread(struct acrn_vcpu *vcpu)
{
#ifdef HV_DEBUG
uint64_t vmexit_begin = 0UL, vmexit_end = 0UL;

View File

@ -30,7 +30,7 @@ bool is_hypercall_from_ring0(void)
*/
int32_t hcall_sos_offline_cpu(struct vm *vm, uint64_t lapicid)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
int i;
pr_info("sos offline cpu with lapicid %lld", lapicid);
@ -198,7 +198,7 @@ int32_t hcall_set_vcpu_regs(struct vm *vm, uint16_t vmid, uint64_t param)
{
struct vm *target_vm = get_vm_from_vmid(vmid);
struct acrn_set_vcpu_regs vcpu_regs;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
if ((target_vm == NULL) || (param == 0U) || is_vm0(target_vm)) {
return -1;
@ -326,7 +326,7 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
struct vm *target_vm = get_vm_from_vmid(vmid);
/* make sure we have set req_buf */

View File

@ -16,7 +16,7 @@ static void fire_vhm_interrupt(void)
* otherwise, send IPI hardcoded to BOOT_CPU_ID
*/
struct vm *vm0;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
vm0 = get_vm_from_vmid(0U);
@ -63,7 +63,7 @@ static void acrn_print_request(uint16_t vcpu_id, const struct vhm_request *req)
*
* @pre vcpu != NULL && io_req != NULL
*/
int32_t acrn_insert_request_wait(struct vcpu *vcpu, const struct io_request *io_req)
int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request *io_req)
{
union vhm_request_buffer *req_buf = NULL;
struct vhm_request *vhm_req;

View File

@ -60,7 +60,7 @@ void free_pcpu(uint16_t pcpu_id)
bitmap_clear_lock(pcpu_id, &pcpu_used_bitmap);
}
void add_vcpu_to_runqueue(struct vcpu *vcpu)
void add_vcpu_to_runqueue(struct acrn_vcpu *vcpu)
{
uint16_t pcpu_id = vcpu->pcpu_id;
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@ -72,7 +72,7 @@ void add_vcpu_to_runqueue(struct vcpu *vcpu)
spinlock_release(&ctx->runqueue_lock);
}
void remove_vcpu_from_runqueue(struct vcpu *vcpu)
void remove_vcpu_from_runqueue(struct acrn_vcpu *vcpu)
{
uint16_t pcpu_id = vcpu->pcpu_id;
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@ -82,21 +82,21 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu)
spinlock_release(&ctx->runqueue_lock);
}
static struct vcpu *select_next_vcpu(uint16_t pcpu_id)
static struct acrn_vcpu *select_next_vcpu(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
struct vcpu *vcpu = NULL;
struct acrn_vcpu *vcpu = NULL;
spinlock_obtain(&ctx->runqueue_lock);
if (!list_empty(&ctx->runqueue)) {
vcpu = get_first_item(&ctx->runqueue, struct vcpu, run_list);
vcpu = get_first_item(&ctx->runqueue, struct acrn_vcpu, run_list);
}
spinlock_release(&ctx->runqueue_lock);
return vcpu;
}
void make_reschedule_request(const struct vcpu *vcpu)
void make_reschedule_request(const struct acrn_vcpu *vcpu)
{
struct sched_context *ctx = &per_cpu(sched_ctx, vcpu->pcpu_id);
@ -113,7 +113,7 @@ int need_reschedule(uint16_t pcpu_id)
return bitmap_test_and_clear_lock(NEED_RESCHEDULE, &ctx->flags);
}
static void context_switch_out(struct vcpu *vcpu)
static void context_switch_out(struct acrn_vcpu *vcpu)
{
/* if it's idle thread, no action for switch out */
if (vcpu == NULL) {
@ -131,7 +131,7 @@ static void context_switch_out(struct vcpu *vcpu)
*/
}
static void context_switch_in(struct vcpu *vcpu)
static void context_switch_in(struct acrn_vcpu *vcpu)
{
/* update current_vcpu */
get_cpu_var(sched_ctx).curr_vcpu = vcpu;
@ -184,7 +184,7 @@ void default_idle(void)
}
}
static void switch_to(struct vcpu *curr)
static void switch_to(struct acrn_vcpu *curr)
{
/*
* reset stack pointer here. Otherwise, schedule
@ -224,8 +224,8 @@ static void switch_to(struct vcpu *curr)
void schedule(void)
{
uint16_t pcpu_id = get_cpu_id();
struct vcpu *next = NULL;
struct vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
struct acrn_vcpu *next = NULL;
struct acrn_vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
get_schedule_lock(pcpu_id);
next = select_next_vcpu(pcpu_id);

View File

@ -12,7 +12,7 @@
/* this hcall is only come from trusty enabled vcpu itself, and cannot be
* called from other vcpus
*/
int32_t hcall_world_switch(struct vcpu *vcpu)
int32_t hcall_world_switch(struct acrn_vcpu *vcpu)
{
int32_t next_world_id = !(vcpu->arch_vcpu.cur_context);
@ -42,7 +42,7 @@ int32_t hcall_world_switch(struct vcpu *vcpu)
/* this hcall is only come from trusty enabled vcpu itself, and cannot be
* called from other vcpus
*/
int32_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param)
int32_t hcall_initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
{
if (vcpu->vm->sworld_control.flag.supported == 0UL) {
dev_dbg(ACRN_DBG_TRUSTY_HYCALL,
@ -72,7 +72,7 @@ int32_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param)
return 0;
}
int64_t hcall_save_restore_sworld_ctx(struct vcpu *vcpu)
int64_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu)
{
struct vm *vm = vcpu->vm;

View File

@ -111,7 +111,7 @@ int general_sw_loader(struct vm *vm)
struct zero_page *zeropage;
struct sw_linux *sw_linux = &(vm->sw.linux_info);
struct sw_kernel_info *sw_kernel = &(vm->sw.kernel_info);
struct vcpu *vcpu = get_primary_vcpu(vm);
struct acrn_vcpu *vcpu = get_primary_vcpu(vm);
pr_dbg("Loading guest to run-time location");

View File

@ -54,7 +54,7 @@ static const char *const excp_names[32] = {
*/
struct intr_excp_ctx *crash_ctx;
static void dump_guest_reg(struct vcpu *vcpu)
static void dump_guest_reg(struct acrn_vcpu *vcpu)
{
printf("\n\n================================================");
printf("================================\n\n");
@ -101,7 +101,7 @@ static void dump_guest_reg(struct vcpu *vcpu)
printf("\r\n");
}
static void dump_guest_stack(struct vcpu *vcpu)
static void dump_guest_stack(struct acrn_vcpu *vcpu)
{
uint32_t i;
uint64_t tmp[DUMP_STACK_SIZE], fault_addr;
@ -126,7 +126,7 @@ static void dump_guest_stack(struct vcpu *vcpu)
printf("\r\n");
}
static void show_guest_call_trace(struct vcpu *vcpu)
static void show_guest_call_trace(struct acrn_vcpu *vcpu)
{
uint64_t bp;
uint64_t count = 0UL;
@ -170,7 +170,7 @@ static void show_guest_call_trace(struct vcpu *vcpu)
static void dump_guest_context(uint16_t pcpu_id)
{
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
vcpu = per_cpu(vcpu, pcpu_id);
if (vcpu != NULL) {

View File

@ -842,7 +842,7 @@ int32_t profiling_msr_ops_all_cpus(struct vm *vm, uint64_t addr)
int32_t profiling_vm_list_info(struct vm *vm, uint64_t addr)
{
struct vm *tmp_vm;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
int32_t vm_idx;
uint16_t i, j;
struct profiling_vm_info_list vm_info_list;
@ -1291,7 +1291,7 @@ void profiling_ipi_handler(__unused void *data)
/*
* Save the VCPU info on vmenter
*/
void profiling_vmenter_handler(__unused struct vcpu *vcpu)
void profiling_vmenter_handler(__unused struct acrn_vcpu *vcpu)
{
if (((get_cpu_var(profiling_info.sep_state).pmu_state == PMU_RUNNING) &&
((sep_collection_switch &
@ -1307,7 +1307,7 @@ void profiling_vmenter_handler(__unused struct vcpu *vcpu)
/*
* Save the VCPU info on vmexit
*/
void profiling_vmexit_handler(struct vcpu *vcpu, uint64_t exit_reason)
void profiling_vmexit_handler(struct acrn_vcpu *vcpu, uint64_t exit_reason)
{
per_cpu(profiling_info.sep_state, vcpu->pcpu_id).total_vmexit_count++;

View File

@ -569,7 +569,7 @@ static int shell_list_vcpu(__unused int argc, __unused char **argv)
{
char temp_str[MAX_STR_SIZE];
struct vm *vm;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
char state[32];
uint16_t i;
uint16_t idx;
@ -624,7 +624,7 @@ static int shell_vcpu_dumpreg(int argc, char **argv)
uint16_t vm_id;
uint16_t vcpu_id;
struct vm *vm;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint64_t mask = 0UL;
struct vcpu_dump dump;

View File

@ -183,7 +183,7 @@ vioapic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation)
* configuration.
*/
void
vioapic_update_tmr(struct vcpu *vcpu)
vioapic_update_tmr(struct acrn_vcpu *vcpu)
{
struct acrn_vioapic *vioapic;
struct acrn_vlapic *vlapic;
@ -370,7 +370,7 @@ vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
*/
if ((changed & NEED_TMR_UPDATE) != 0UL) {
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
dev_dbg(ACRN_DBG_IOAPIC,
"ioapic pin%hhu: recalculate vlapic trigger-mode reg",

View File

@ -189,7 +189,7 @@ static void vpic_notify_intr(struct acrn_vpic *vpic)
*/
i8259->intr_raised = true;
if (vpic->vm->wire_mode == VPIC_WIRE_INTR) {
struct vcpu *vcpu = vcpu_from_vid(vpic->vm, 0U);
struct acrn_vcpu *vcpu = vcpu_from_vid(vpic->vm, 0U);
vcpu_inject_extint(vcpu);
} else {
vlapic_set_local_intr(vpic->vm, BROADCAST_CPU_ID, APIC_LVT_LINT0);

View File

@ -132,7 +132,7 @@ static inline void cpuid_subleaf(uint32_t leaf, uint32_t subleaf,
}
int set_vcpuid_entries(struct vm *vm);
void guest_cpuid(struct vcpu *vcpu,
void guest_cpuid(struct acrn_vcpu *vcpu,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);

View File

@ -112,9 +112,9 @@ enum vm_paging_mode {
*/
uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask);
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa, uint32_t *err_code);
enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu);
enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu);
void init_e820(void);
void obtain_e820_mem_info(void);
@ -135,9 +135,9 @@ extern const struct e820_entry e820_default_entries[NUM_E820_ENTRIES];
extern uint32_t boot_regs[2];
extern struct e820_mem_params e820_mem;
int rdmsr_vmexit_handler(struct vcpu *vcpu);
int wrmsr_vmexit_handler(struct vcpu *vcpu);
void init_msr_emulation(struct vcpu *vcpu);
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu);
int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu);
void init_msr_emulation(struct acrn_vcpu *vcpu);
struct run_context;
int vmx_vmrun(struct run_context *context, int ops, int ibrs);
@ -203,7 +203,7 @@ int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
* @param[out] err_code The page fault flags
* @param[out] fault_addr The GVA address that causes a page fault
*/
int copy_from_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
int copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
/**
* @brief Copy data from HV address space to VM GVA space
@ -218,7 +218,7 @@ int copy_from_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
* @param[out] err_code The page fault flags
* @param[out] fault_addr The GVA address that causes a page fault
*/
int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
extern struct acrn_vcpu_regs vm0_boot_context;
/**

View File

@ -20,7 +20,7 @@ struct ucode_header {
uint32_t reserved[3];
};
void acrn_update_ucode(struct vcpu *vcpu, uint64_t v);
void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v);
uint64_t get_microcode_version(void);
#endif /* UCODE_H */

View File

@ -220,7 +220,7 @@ struct vcpu_arch {
} __aligned(CPU_PAGE_SIZE);
struct vm;
struct vcpu {
struct acrn_vcpu {
/* Architecture specific definitions for this VCPU */
struct vcpu_arch arch_vcpu;
uint16_t pcpu_id; /* Physical CPU ID of this VCPU */
@ -259,24 +259,24 @@ struct vcpu {
} __aligned(CPU_PAGE_SIZE);
struct vcpu_dump {
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
char *str;
uint32_t str_max;
};
static inline bool is_vcpu_bsp(const struct vcpu *vcpu)
static inline bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
{
return (vcpu->vcpu_id == BOOT_CPU_ID);
}
/* do not update Guest RIP for next VM Enter */
static inline void vcpu_retain_rip(struct vcpu *vcpu)
static inline void vcpu_retain_rip(struct acrn_vcpu *vcpu)
{
(vcpu)->arch_vcpu.inst_len = 0U;
}
static inline struct acrn_vlapic *
vcpu_vlapic(struct vcpu *vcpu)
vcpu_vlapic(struct acrn_vcpu *vcpu)
{
return &(vcpu->arch_vcpu.vlapic);
}
@ -293,7 +293,7 @@ vcpu_vlapic(struct vcpu *vcpu)
*
* @return the value of the register.
*/
uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg);
uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg);
/**
* @brief set vcpu register value
@ -304,7 +304,7 @@ uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg);
* @param[in] reg register of the vcpu
* @param[in] val the value set the register of the vcpu
*/
void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val);
void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val);
/**
* @brief get vcpu RIP value
@ -315,7 +315,7 @@ void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val);
*
* @return the value of RIP.
*/
uint64_t vcpu_get_rip(struct vcpu *vcpu);
uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RIP value
@ -325,7 +325,7 @@ uint64_t vcpu_get_rip(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RIP
*/
void vcpu_set_rip(struct vcpu *vcpu, uint64_t val);
void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu RSP value
@ -336,7 +336,7 @@ void vcpu_set_rip(struct vcpu *vcpu, uint64_t val);
*
* @return the value of RSP.
*/
uint64_t vcpu_get_rsp(struct vcpu *vcpu);
uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RSP value
@ -346,7 +346,7 @@ uint64_t vcpu_get_rsp(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RSP
*/
void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val);
void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu EFER value
@ -357,7 +357,7 @@ void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val);
*
* @return the value of EFER.
*/
uint64_t vcpu_get_efer(struct vcpu *vcpu);
uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu EFER value
@ -367,7 +367,7 @@ uint64_t vcpu_get_efer(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set EFER
*/
void vcpu_set_efer(struct vcpu *vcpu, uint64_t val);
void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu RFLAG value
@ -378,7 +378,7 @@ void vcpu_set_efer(struct vcpu *vcpu, uint64_t val);
*
* @return the value of RFLAGS.
*/
uint64_t vcpu_get_rflags(struct vcpu *vcpu);
uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu RFLAGS value
@ -388,7 +388,7 @@ uint64_t vcpu_get_rflags(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set RFLAGS
*/
void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val);
void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu CR0 value
@ -399,7 +399,7 @@ void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val);
*
* @return the value of CR0.
*/
uint64_t vcpu_get_cr0(struct vcpu *vcpu);
uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR0 value
@ -409,7 +409,7 @@ uint64_t vcpu_get_cr0(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR0
*/
void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val);
void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu CR2 value
@ -420,7 +420,7 @@ void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val);
*
* @return the value of CR2.
*/
uint64_t vcpu_get_cr2(struct vcpu *vcpu);
uint64_t vcpu_get_cr2(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR2 value
@ -430,7 +430,7 @@ uint64_t vcpu_get_cr2(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR2
*/
void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get vcpu CR4 value
@ -441,7 +441,7 @@ void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val);
*
* @return the value of CR4.
*/
uint64_t vcpu_get_cr4(struct vcpu *vcpu);
uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu);
/**
* @brief set vcpu CR4 value
@ -451,10 +451,10 @@ uint64_t vcpu_get_cr4(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] val the value set CR4
*/
void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val);
void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_pat_ext(const struct vcpu *vcpu);
void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu);
void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief set all the vcpu registers
@ -464,7 +464,7 @@ void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] vcpu_regs all the registers' value
*/
void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
/**
* @brief reset all the vcpu registers
@ -473,7 +473,7 @@ void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
*
* @param[inout] vcpu pointer to vcpu data structure
*/
void reset_vcpu_regs(struct vcpu *vcpu);
void reset_vcpu_regs(struct acrn_vcpu *vcpu);
/**
* @brief set the vcpu AP entry
@ -483,24 +483,24 @@ void reset_vcpu_regs(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] entry the entry value for AP
*/
void set_ap_entry(struct vcpu *vcpu, uint64_t entry);
void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry);
static inline bool is_long_mode(struct vcpu *vcpu)
static inline bool is_long_mode(struct acrn_vcpu *vcpu)
{
return (vcpu_get_efer(vcpu) & MSR_IA32_EFER_LMA_BIT) != 0UL;
}
static inline bool is_paging_enabled(struct vcpu *vcpu)
static inline bool is_paging_enabled(struct acrn_vcpu *vcpu)
{
return (vcpu_get_cr0(vcpu) & CR0_PG) != 0UL;
}
static inline bool is_pae(struct vcpu *vcpu)
static inline bool is_pae(struct acrn_vcpu *vcpu)
{
return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL;
}
struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
struct acrn_vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
/**
* @brief create a vcpu for the target vm
@ -514,7 +514,7 @@ struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
*
* @return 0: vcpu created successfully, other values failed.
*/
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_handle);
/**
* @brief run into non-root mode based on vcpu setting
@ -527,9 +527,9 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle);
*
* @return 0: vcpu run successfully, other values failed.
*/
int run_vcpu(struct vcpu *vcpu);
int run_vcpu(struct acrn_vcpu *vcpu);
int shutdown_vcpu(struct vcpu *vcpu);
int shutdown_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief unmap the vcpu with pcpu and free its vlapic
@ -539,7 +539,7 @@ int shutdown_vcpu(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @pre vcpu != NULL
*/
void offline_vcpu(struct vcpu *vcpu);
void offline_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief reset vcpu state and values
@ -548,7 +548,7 @@ void offline_vcpu(struct vcpu *vcpu);
*
* @param[inout] vcpu pointer to vcpu data structure
*/
void reset_vcpu(struct vcpu *vcpu);
void reset_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief pause the vcpu and set new state
@ -558,7 +558,7 @@ void reset_vcpu(struct vcpu *vcpu);
* @param[inout] vcpu pointer to vcpu data structure
* @param[in] new_state the state to set vcpu
*/
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state);
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state);
/**
* @brief resume the vcpu
@ -567,7 +567,7 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state);
*
* @param[inout] vcpu pointer to vcpu data structure
*/
void resume_vcpu(struct vcpu *vcpu);
void resume_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief set the vcpu to running state, then it will be scheculed.
@ -576,7 +576,7 @@ void resume_vcpu(struct vcpu *vcpu);
*
* @param[inout] vcpu pointer to vcpu data structure
*/
void schedule_vcpu(struct vcpu *vcpu);
void schedule_vcpu(struct acrn_vcpu *vcpu);
/**
* @brief create a vcpu for the vm and mapped to the pcpu.
@ -588,7 +588,7 @@ void schedule_vcpu(struct vcpu *vcpu);
*/
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id);
void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id);
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id);
void vcpu_dumpreg(void *data);

View File

@ -96,7 +96,7 @@ void vioapic_set_irq(struct vm *vm, uint32_t irq, uint32_t operation);
* @return void
*/
void vioapic_set_irq_nolock(struct vm *vm, uint32_t irq, uint32_t operation);
void vioapic_update_tmr(struct vcpu *vcpu);
void vioapic_update_tmr(struct acrn_vcpu *vcpu);
uint32_t vioapic_pincount(const struct vm *vm);
void vioapic_process_eoi(struct vm *vm, uint32_t vector);

View File

@ -70,7 +70,7 @@ struct acrn_vlapic {
struct vlapic_pir_desc pir_desc;
struct vm *vm;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
uint32_t esr_pending;
int esr_firing;
@ -177,16 +177,16 @@ void vlapic_post_intr(uint16_t dest_pcpu_id);
*
* @pre vcpu != NULL
*/
uint64_t apicv_get_pir_desc_paddr(struct vcpu *vcpu);
uint64_t apicv_get_pir_desc_paddr(struct acrn_vcpu *vcpu);
int vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval);
int vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval);
int vlapic_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval);
int vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval);
/*
* Signals to the LAPIC that an interrupt at 'vector' needs to be generated
* to the 'cpu', the state is recorded in IRR.
*/
int vlapic_set_intr(struct vcpu *vcpu, uint32_t vector, bool level);
int vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level);
#define LAPIC_TRIG_LEVEL true
#define LAPIC_TRIG_EDGE false
@ -200,7 +200,7 @@ int vlapic_set_intr(struct vcpu *vcpu, uint32_t vector, bool level);
* @return -EINVAL on error that vector is invalid or vcpu is NULL.
*/
static inline int
vlapic_intr_level(struct vcpu *vcpu, uint32_t vector)
vlapic_intr_level(struct acrn_vcpu *vcpu, uint32_t vector)
{
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_LEVEL);
}
@ -215,7 +215,7 @@ vlapic_intr_level(struct vcpu *vcpu, uint32_t vector)
* @return -EINVAL on error that vector is invalid or vcpu is NULL.
*/
static inline int
vlapic_intr_edge(struct vcpu *vcpu, uint32_t vector)
vlapic_intr_edge(struct acrn_vcpu *vcpu, uint32_t vector)
{
return vlapic_set_intr(vcpu, vector, LAPIC_TRIG_EDGE);
}
@ -265,11 +265,11 @@ void vlapic_set_tmr_one_vec(struct acrn_vlapic *vlapic, uint32_t delmode,
void vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic);
uint32_t vlapic_get_apicid(struct acrn_vlapic *vlapic);
int vlapic_create(struct vcpu *vcpu);
int vlapic_create(struct acrn_vcpu *vcpu);
/*
* @pre vcpu != NULL
*/
void vlapic_free(struct vcpu *vcpu);
void vlapic_free(struct acrn_vcpu *vcpu);
void vlapic_init(struct acrn_vlapic *vlapic);
void vlapic_reset(struct acrn_vlapic *vlapic);
void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs);
@ -277,10 +277,10 @@ bool vlapic_enabled(const struct acrn_vlapic *vlapic);
uint64_t vlapic_apicv_get_apic_access_addr(void);
uint64_t vlapic_apicv_get_apic_page_addr(struct acrn_vlapic *vlapic);
void vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic);
int apic_access_vmexit_handler(struct vcpu *vcpu);
int apic_write_vmexit_handler(struct vcpu *vcpu);
int veoi_vmexit_handler(struct vcpu *vcpu);
int tpr_below_threshold_vmexit_handler(__unused struct vcpu *vcpu);
int apic_access_vmexit_handler(struct acrn_vcpu *vcpu);
int apic_write_vmexit_handler(struct acrn_vcpu *vcpu);
int veoi_vmexit_handler(struct acrn_vcpu *vcpu);
int tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu);
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
/**

View File

@ -23,7 +23,7 @@ enum vm_privilege_level {
struct vm_hw_info {
/* vcpu array of this VM */
struct vcpu vcpu_array[CONFIG_MAX_VCPUS_PER_VM];
struct acrn_vcpu vcpu_array[CONFIG_MAX_VCPUS_PER_VM];
uint16_t created_vcpus; /* Number of created vcpus */
uint64_t gpa_lowtop; /* top lowmem gpa of this VM */
} __aligned(CPU_PAGE_SIZE);
@ -205,10 +205,10 @@ static inline bool is_vm0(const struct vm *vm)
/*
* @pre vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/
static inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
static inline struct acrn_vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
{
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (vcpu->vcpu_id == vcpu_id) {
@ -218,10 +218,10 @@ static inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
return vcpu;
}
static inline struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
static inline struct acrn_vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
{
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (vcpu->pcpu_id == pcpu_id) {
@ -232,10 +232,10 @@ static inline struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
return NULL;
}
static inline struct vcpu *get_primary_vcpu(struct vm *vm)
static inline struct acrn_vcpu *get_primary_vcpu(struct vm *vm)
{
uint16_t i;
struct vcpu *vcpu;
struct acrn_vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (is_vcpu_bsp(vcpu)) {

View File

@ -49,7 +49,7 @@ struct vm_io_range {
struct vm_io_handler;
struct vm;
struct vcpu;
struct acrn_vcpu;
typedef
uint32_t (*io_read_fn_t)(struct vm *vm, uint16_t port, size_t size);
@ -178,7 +178,7 @@ struct mem_io_node {
*
* @param vcpu The virtual CPU which triggers the VM exit on I/O instruction
*/
int32_t pio_instr_vmexit_handler(struct vcpu *vcpu);
int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu);
/**
* @brief Initialize the I/O bitmap for \p vm
@ -259,7 +259,7 @@ void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
* either a previous call to emulate_io() returning 0 or the corresponding VHM
* request transferring to the COMPLETE state.
*/
void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req);
void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io_req);
/**
* @brief Post-work of VHM requests for MMIO emulation
@ -271,7 +271,7 @@ void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
* @remark This function must be called after the VHM request corresponding to
* \p vcpu being transferred to the COMPLETE state.
*/
void dm_emulate_mmio_post(struct vcpu *vcpu);
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu);
/**
* @brief Emulate \p io_req for \p vcpu
@ -288,14 +288,14 @@ void dm_emulate_mmio_post(struct vcpu *vcpu);
* @return -EINVAL - \p io_req has an invalid type.
* @return Negative on other errors during emulation.
*/
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
int32_t emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req);
/**
* @brief General post-work for all kinds of VHM requests for I/O emulation
*
* @param vcpu The virtual CPU that triggers the MMIO access
*/
void emulate_io_post(struct vcpu *vcpu);
void emulate_io_post(struct acrn_vcpu *vcpu);
/**
* @brief Deliver \p io_req to SOS and suspend \p vcpu till its completion
@ -305,7 +305,7 @@ void emulate_io_post(struct vcpu *vcpu);
*
* @pre vcpu != NULL && io_req != NULL
*/
int32_t acrn_insert_request_wait(struct vcpu *vcpu, const struct io_request *io_req);
int32_t acrn_insert_request_wait(struct acrn_vcpu *vcpu, const struct io_request *io_req);
/**
* @}

View File

@ -141,7 +141,7 @@ uint32_t irq_to_vector(uint32_t irq);
*
* @pre vcpu != NULL
*/
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code);
int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code);
/**
* @brief Inject external interrupt to guest.
@ -152,7 +152,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code);
*
* @pre vcpu != NULL
*/
void vcpu_inject_extint(struct vcpu *vcpu);
void vcpu_inject_extint(struct acrn_vcpu *vcpu);
/**
* @brief Inject NMI to guest.
@ -163,7 +163,7 @@ void vcpu_inject_extint(struct vcpu *vcpu);
*
* @pre vcpu != NULL
*/
void vcpu_inject_nmi(struct vcpu *vcpu);
void vcpu_inject_nmi(struct acrn_vcpu *vcpu);
/**
* @brief Inject general protection exeception(GP) to guest.
@ -175,7 +175,7 @@ void vcpu_inject_nmi(struct vcpu *vcpu);
*
* @pre vcpu != NULL
*/
void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code);
void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code);
/**
* @brief Inject page fault exeception(PF) to guest.
@ -188,7 +188,7 @@ void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code);
*
* @pre vcpu != NULL
*/
void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code);
void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code);
/**
* @brief Inject invalid opcode exeception(UD) to guest.
@ -199,7 +199,7 @@ void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code);
*
* @pre vcpu != NULL
*/
void vcpu_inject_ud(struct vcpu *vcpu);
void vcpu_inject_ud(struct acrn_vcpu *vcpu);
/**
* @brief Inject alignment check exeception(AC) to guest.
@ -210,7 +210,7 @@ void vcpu_inject_ud(struct vcpu *vcpu);
*
* @pre vcpu != NULL
*/
void vcpu_inject_ac(struct vcpu *vcpu);
void vcpu_inject_ac(struct acrn_vcpu *vcpu);
/**
* @brief Inject stack fault exeception(SS) to guest.
@ -221,16 +221,16 @@ void vcpu_inject_ac(struct vcpu *vcpu);
*
* @pre vcpu != NULL
*/
void vcpu_inject_ss(struct vcpu *vcpu);
void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid);
void vcpu_inject_ss(struct acrn_vcpu *vcpu);
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid);
/*
* @pre vcpu != NULL
*/
int exception_vmexit_handler(struct vcpu *vcpu);
int interrupt_window_vmexit_handler(struct vcpu *vcpu);
int external_interrupt_vmexit_handler(struct vcpu *vcpu);
int acrn_handle_pending_request(struct vcpu *vcpu);
int exception_vmexit_handler(struct acrn_vcpu *vcpu);
int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu);
int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu);
int acrn_handle_pending_request(struct acrn_vcpu *vcpu);
/**
* @brief Initialize the interrupt
@ -241,7 +241,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu);
*/
void interrupt_init(uint16_t pcpu_id);
void cancel_event_injection(struct vcpu *vcpu);
void cancel_event_injection(struct acrn_vcpu *vcpu);
#ifdef HV_DEBUG
/**

View File

@ -149,7 +149,7 @@ void flush_vpid_global(void);
*
* @return None
*/
void invept(const struct vcpu *vcpu);
void invept(const struct acrn_vcpu *vcpu);
/**
* @brief Host-physical address continous checking
*
@ -302,7 +302,7 @@ void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
* @return -EINVAL - fail to handle the EPT violation
* @return 0 - Success to handle the EPT violation
*/
int ept_violation_vmexit_handler(struct vcpu *vcpu);
int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu);
/**
* @brief EPT misconfiguration handling
*
@ -311,7 +311,7 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu);
* @return -EINVAL - fail to handle the EPT misconfig
* @return 0 - Success to handle the EPT misconfig
*/
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
int ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu);
/**
* @}

View File

@ -62,7 +62,7 @@ struct mtrr_state {
*
* @return None
*/
void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value);
void mtrr_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t value);
/**
* @brief Virtual MTRR MSR read
*
@ -71,7 +71,7 @@ void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value);
*
* @return unsigned long integer - The specified virtual MTRR MSR value
*/
uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr);
uint64_t mtrr_rdmsr(const struct acrn_vcpu *vcpu, uint32_t msr);
/**
* @brief Virtual MTRR initialization
*
@ -79,7 +79,7 @@ uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr);
*
* @return None
*/
void init_mtrr(struct vcpu *vcpu);
void init_mtrr(struct acrn_vcpu *vcpu);
/**
* @}
*/

View File

@ -128,11 +128,11 @@ struct trusty_startup_param {
uint8_t padding[4];
};
void switch_world(struct vcpu *vcpu, int next_world);
bool initialize_trusty(struct vcpu *vcpu, uint64_t param);
void switch_world(struct acrn_vcpu *vcpu, int next_world);
bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param);
void destroy_secure_world(struct vm *vm, bool need_clr_mem);
void save_sworld_context(struct vcpu *vcpu);
void restore_sworld_context(struct vcpu *vcpu);
void save_sworld_context(struct acrn_vcpu *vcpu);
void restore_sworld_context(struct acrn_vcpu *vcpu);
void trusty_set_dseed(const void *dseed, uint8_t dseed_num);
#endif /* TRUSTY_H_ */

View File

@ -8,14 +8,14 @@
#define VMEXIT_H_
struct vm_exit_dispatch {
int (*handler)(struct vcpu *);
int (*handler)(struct acrn_vcpu *);
uint32_t need_exit_qualification;
};
int vmexit_handler(struct vcpu *vcpu);
int vmcall_vmexit_handler(struct vcpu *vcpu);
int cpuid_vmexit_handler(struct vcpu *vcpu);
int cr_access_vmexit_handler(struct vcpu *vcpu);
int vmexit_handler(struct acrn_vcpu *vcpu);
int vmcall_vmexit_handler(struct acrn_vcpu *vcpu);
int cpuid_vmexit_handler(struct acrn_vcpu *vcpu);
int cr_access_vmexit_handler(struct acrn_vcpu *vcpu);
extern void vm_exit(void);
static inline uint64_t
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)

View File

@ -451,22 +451,22 @@ void exec_vmwrite32(uint32_t field, uint32_t value);
void exec_vmwrite64(uint32_t field_full, uint64_t value);
#define exec_vmwrite exec_vmwrite64
void init_vmcs(struct vcpu *vcpu);
void init_vmcs(struct acrn_vcpu *vcpu);
void vmx_off(uint16_t pcpu_id);
void exec_vmclear(void *addr);
void exec_vmptrld(void *addr);
uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu);
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value);
uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu);
int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value);
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0);
void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4);
void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0);
void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4);
bool is_vmx_disabled(void);
void switch_apicv_mode_x2apic(struct vcpu *vcpu);
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu);
static inline enum vm_cpu_mode get_vcpu_mode(const struct vcpu *vcpu)
static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
{
return vcpu->arch_vcpu.cpu_mode;
}

View File

@ -405,7 +405,7 @@ int32_t hcall_vm_intr_monitor(struct vm *vm, uint16_t vmid, uint64_t param);
* @return 0 on success, non-zero on error.
*/
int32_t hcall_world_switch(struct vcpu *vcpu);
int32_t hcall_world_switch(struct acrn_vcpu *vcpu);
/**
* @brief Initialize environment for Trusty-OS on a vCPU.
@ -421,7 +421,7 @@ int32_t hcall_world_switch(struct vcpu *vcpu);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param);
int32_t hcall_initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param);
/**
* @brief Save/Restore Context of Secure World.
@ -430,7 +430,7 @@ int32_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_save_restore_sworld_ctx(struct vcpu *vcpu);
int64_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu);
/**
* @}

View File

@ -14,7 +14,7 @@ struct sched_context {
spinlock_t runqueue_lock;
struct list_head runqueue;
uint64_t flags;
struct vcpu *curr_vcpu;
struct acrn_vcpu *curr_vcpu;
spinlock_t scheduler_lock;
};
@ -26,18 +26,18 @@ void set_pcpu_used(uint16_t pcpu_id);
uint16_t allocate_pcpu(void);
void free_pcpu(uint16_t pcpu_id);
void add_vcpu_to_runqueue(struct vcpu *vcpu);
void remove_vcpu_from_runqueue(struct vcpu *vcpu);
void add_vcpu_to_runqueue(struct acrn_vcpu *vcpu);
void remove_vcpu_from_runqueue(struct acrn_vcpu *vcpu);
void default_idle(void);
void make_reschedule_request(const struct vcpu *vcpu);
void make_reschedule_request(const struct acrn_vcpu *vcpu);
int need_reschedule(uint16_t pcpu_id);
void make_pcpu_offline(uint16_t pcpu_id);
int need_offline(uint16_t pcpu_id);
void schedule(void);
void vcpu_thread(struct vcpu *vcpu);
void vcpu_thread(struct acrn_vcpu *vcpu);
#endif /* SCHEDULE_H */

View File

@ -11,14 +11,14 @@
#include <profiling_internal.h>
void profiling_vmenter_handler(struct vcpu *vcpu);
void profiling_vmexit_handler(struct vcpu *vcpu, uint64_t exit_reason);
void profiling_vmenter_handler(struct acrn_vcpu *vcpu);
void profiling_vmexit_handler(struct acrn_vcpu *vcpu, uint64_t exit_reason);
void profiling_setup(void);
#else
static inline void profiling_vmenter_handler(__unused struct vcpu *vcpu) {}
static inline void profiling_vmexit_handler(__unused struct vcpu *vcpu,
static inline void profiling_vmenter_handler(__unused struct acrn_vcpu *vcpu) {}
static inline void profiling_vmexit_handler(__unused struct acrn_vcpu *vcpu,
__unused uint64_t exit_reason) {}
static inline void profiling_setup(void) {}