mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-23 09:47:44 +00:00
HV:treewide:rename vcpu data structure
For data structure types "struct vcpu", its name is identical with variable name in the same scope. This is a MISRA C violation. Naming convention rule:If the data structure type is used by multi modules, its corresponding logic resource is exposed to external components (such as SOS, UOS), and its name meaning is simplistic (such as vcpu, vm), its name needs prefix "acrn_". The following udpates are made: struct vcpu *vcpu-->struct acrn_vcpu *vcpu Tracked-On: #861 Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
This commit is contained in:
@@ -442,7 +442,7 @@ static void ptdev_intr_handle_irq(struct vm *vm,
|
||||
|
||||
void ptdev_softirq(uint16_t pcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu = (struct vcpu *)per_cpu(vcpu, pcpu_id);
|
||||
struct acrn_vcpu *vcpu = (struct acrn_vcpu *)per_cpu(vcpu, pcpu_id);
|
||||
struct vm *vm = vcpu->vm;
|
||||
|
||||
while (1) {
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
|
||||
static inline struct vcpuid_entry *find_vcpuid_entry(const struct vcpu *vcpu,
|
||||
static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcpu,
|
||||
uint32_t leaf_arg, uint32_t subleaf)
|
||||
{
|
||||
uint32_t i = 0U, nr, half;
|
||||
@@ -293,7 +293,7 @@ int set_vcpuid_entries(struct vm *vm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void guest_cpuid(struct vcpu *vcpu,
|
||||
void guest_cpuid(struct acrn_vcpu *vcpu,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
{
|
||||
|
@@ -28,7 +28,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
|
||||
uint64_t hpa = INVALID_HPA;
|
||||
uint64_t *pgentry, pg_size = 0UL;
|
||||
void *eptp;
|
||||
struct vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
|
||||
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
|
||||
|
||||
if ((vcpu != NULL) && (vcpu->arch_vcpu.cur_context == SECURE_WORLD)) {
|
||||
eptp = vm->arch_vm.sworld_eptp;
|
||||
@@ -71,7 +71,7 @@ uint64_t vm0_hpa2gpa(uint64_t hpa)
|
||||
return hpa;
|
||||
}
|
||||
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
||||
int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int status = -EINVAL, ret;
|
||||
uint64_t exit_qual;
|
||||
@@ -158,7 +158,7 @@ out:
|
||||
return status;
|
||||
}
|
||||
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
|
||||
int ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int status;
|
||||
|
||||
@@ -182,7 +182,7 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
|
||||
{
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint64_t prot = prot_orig;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx size: 0x%016llx prot: 0x%016x\n",
|
||||
@@ -207,7 +207,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint16_t i;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
|
||||
@@ -223,7 +223,7 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
|
||||
*/
|
||||
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint16_t i;
|
||||
|
||||
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
|
||||
|
@@ -36,7 +36,7 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
|
||||
{
|
||||
uint16_t vcpu_id;
|
||||
uint64_t dmask = 0UL;
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
|
||||
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
|
||||
if (vdmask & (1U << vcpu_id)) {
|
||||
@@ -48,7 +48,7 @@ uint64_t vcpumask2pcpumask(struct vm *vm, uint64_t vdmask)
|
||||
return dmask;
|
||||
}
|
||||
|
||||
enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
|
||||
enum vm_paging_mode get_vcpu_paging_mode(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
enum vm_cpu_mode cpu_mode;
|
||||
|
||||
@@ -72,7 +72,7 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
|
||||
|
||||
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
|
||||
* during page walk */
|
||||
static int local_gva2gpa_common(struct vcpu *vcpu, const struct page_walk_info *pw_info,
|
||||
static int local_gva2gpa_common(struct acrn_vcpu *vcpu, const struct page_walk_info *pw_info,
|
||||
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
|
||||
{
|
||||
uint32_t i;
|
||||
@@ -219,7 +219,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int local_gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
|
||||
static int local_gva2gpa_pae(struct acrn_vcpu *vcpu, struct page_walk_info *pw_info,
|
||||
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
|
||||
{
|
||||
int index;
|
||||
@@ -269,7 +269,7 @@ out:
|
||||
* - Return -EFAULT for paging fault, and refer to err_code for paging fault
|
||||
* error code.
|
||||
*/
|
||||
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
|
||||
int gva2gpa(struct acrn_vcpu *vcpu, uint64_t gva, uint64_t *gpa,
|
||||
uint32_t *err_code)
|
||||
{
|
||||
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
|
||||
@@ -385,7 +385,7 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr_arg, uint64_t gpa_arg,
|
||||
/*
|
||||
* @pre vcpu != NULL && err_code != NULL
|
||||
*/
|
||||
static inline int copy_gva(struct vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
|
||||
static inline int copy_gva(struct acrn_vcpu *vcpu, void *h_ptr_arg, uint64_t gva_arg,
|
||||
uint32_t size_arg, uint32_t *err_code, uint64_t *fault_addr,
|
||||
bool cp_from_vm)
|
||||
{
|
||||
@@ -443,13 +443,13 @@ int copy_to_gpa(struct vm *vm, void *h_ptr, uint64_t gpa, uint32_t size)
|
||||
return copy_gpa(vm, h_ptr, gpa, size, 0);
|
||||
}
|
||||
|
||||
int copy_from_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
|
||||
int copy_from_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
|
||||
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
|
||||
{
|
||||
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 1);
|
||||
}
|
||||
|
||||
int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
|
||||
int copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
|
||||
uint32_t size, uint32_t *err_code, uint64_t *fault_addr)
|
||||
{
|
||||
return copy_gva(vcpu, h_ptr, gva, size, err_code, fault_addr, 0);
|
||||
|
@@ -322,7 +322,7 @@ static uint32_t get_vmcs_field(enum cpu_reg_name ident)
|
||||
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
|
||||
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
|
||||
*/
|
||||
static uint64_t vm_get_register(const struct vcpu *vcpu, enum cpu_reg_name reg)
|
||||
static uint64_t vm_get_register(const struct acrn_vcpu *vcpu, enum cpu_reg_name reg)
|
||||
{
|
||||
uint64_t reg_val = 0UL;
|
||||
|
||||
@@ -349,7 +349,7 @@ static uint64_t vm_get_register(const struct vcpu *vcpu, enum cpu_reg_name reg)
|
||||
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
|
||||
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
|
||||
*/
|
||||
static void vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg,
|
||||
static void vm_set_register(struct acrn_vcpu *vcpu, enum cpu_reg_name reg,
|
||||
uint64_t val)
|
||||
{
|
||||
|
||||
@@ -387,7 +387,7 @@ static void vm_get_seg_desc(enum cpu_reg_name seg, struct seg_desc *desc)
|
||||
desc->access = exec_vmread32(tdesc.access_field);
|
||||
}
|
||||
|
||||
static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
|
||||
static void get_guest_paging_info(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
|
||||
uint32_t csar)
|
||||
{
|
||||
uint8_t cpl;
|
||||
@@ -492,7 +492,7 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_read(const struct vcpu *vcpu, uint64_t *rval)
|
||||
static int mmio_read(const struct acrn_vcpu *vcpu, uint64_t *rval)
|
||||
{
|
||||
if (vcpu == NULL) {
|
||||
return -EINVAL;
|
||||
@@ -502,7 +502,7 @@ static int mmio_read(const struct vcpu *vcpu, uint64_t *rval)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_write(struct vcpu *vcpu, uint64_t wval)
|
||||
static int mmio_write(struct acrn_vcpu *vcpu, uint64_t wval)
|
||||
{
|
||||
if (vcpu == NULL) {
|
||||
return -EINVAL;
|
||||
@@ -538,7 +538,7 @@ static void vie_calc_bytereg(const struct instr_emul_vie *vie,
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static uint8_t vie_read_bytereg(const struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int lhbr;
|
||||
uint64_t val;
|
||||
@@ -561,7 +561,7 @@ static uint8_t vie_read_bytereg(const struct vcpu *vcpu, const struct instr_emul
|
||||
return reg_val;
|
||||
}
|
||||
|
||||
static void vie_write_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vie,
|
||||
static void vie_write_bytereg(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie,
|
||||
uint8_t byte)
|
||||
{
|
||||
uint64_t origval, val, mask;
|
||||
@@ -591,7 +591,7 @@ static void vie_write_bytereg(struct vcpu *vcpu, const struct instr_emul_vie *vi
|
||||
* @pre ((reg <= CPU_REG_LAST) && (reg >= CPU_REG_FIRST))
|
||||
* @pre ((reg != CPU_REG_CR2) && (reg != CPU_REG_IDTR) && (reg != CPU_REG_GDTR))
|
||||
*/
|
||||
static void vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
|
||||
static void vie_update_register(struct acrn_vcpu *vcpu, enum cpu_reg_name reg,
|
||||
uint64_t val_arg, uint8_t size)
|
||||
{
|
||||
uint64_t origval;
|
||||
@@ -616,7 +616,7 @@ static void vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
|
||||
|
||||
#define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
|
||||
|
||||
static void vie_update_rflags(struct vcpu *vcpu, uint64_t rflags2, uint64_t psl)
|
||||
static void vie_update_rflags(struct acrn_vcpu *vcpu, uint64_t rflags2, uint64_t psl)
|
||||
{
|
||||
uint8_t size;
|
||||
uint64_t rflags;
|
||||
@@ -664,7 +664,7 @@ static uint64_t getcc(uint8_t opsize, uint64_t x, uint64_t y)
|
||||
}
|
||||
}
|
||||
|
||||
static int emulate_mov(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_mov(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -780,7 +780,7 @@ static int emulate_mov(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_movx(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_movx(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -883,7 +883,7 @@ static int emulate_movx(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
*
|
||||
* It's only used by MOVS/STO
|
||||
*/
|
||||
static void get_gva_si_nocheck(const struct vcpu *vcpu, uint8_t addrsize,
|
||||
static void get_gva_si_nocheck(const struct acrn_vcpu *vcpu, uint8_t addrsize,
|
||||
enum cpu_reg_name seg, uint64_t *gva)
|
||||
{
|
||||
uint64_t val;
|
||||
@@ -907,7 +907,7 @@ static void get_gva_si_nocheck(const struct vcpu *vcpu, uint8_t addrsize,
|
||||
*
|
||||
* It's only used by MOVS/STO
|
||||
*/
|
||||
static int get_gva_di_check(struct vcpu *vcpu, struct instr_emul_vie *vie,
|
||||
static int get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
|
||||
uint8_t addrsize, uint64_t *gva)
|
||||
{
|
||||
int ret;
|
||||
@@ -980,7 +980,7 @@ exception_inject:
|
||||
* For MOVs instruction, we always check RDI during instruction decoding phase.
|
||||
* And access RSI without any check during instruction emulation phase.
|
||||
*/
|
||||
static int emulate_movs(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_movs(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
uint64_t src_gva, gpa, val = 0UL;
|
||||
uint64_t *dst_hva, *src_hva;
|
||||
@@ -1067,7 +1067,7 @@ done:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_stos(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_stos(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error, repeat;
|
||||
uint8_t opsize = vie->opsize;
|
||||
@@ -1121,7 +1121,7 @@ static int emulate_stos(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emulate_test(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_test(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -1187,7 +1187,7 @@ static int emulate_test(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_and(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_and(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -1275,7 +1275,7 @@ static int emulate_and(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_or(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_or(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -1366,7 +1366,7 @@ static int emulate_or(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_cmp(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_cmp(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -1458,7 +1458,7 @@ static int emulate_cmp(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_sub(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_sub(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
uint8_t size;
|
||||
@@ -1512,7 +1512,7 @@ static int emulate_sub(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_group1(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_group1(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
int error;
|
||||
|
||||
@@ -1534,7 +1534,7 @@ static int emulate_group1(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int emulate_bittest(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int emulate_bittest(struct acrn_vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
{
|
||||
uint64_t val, rflags, bitmask;
|
||||
int error;
|
||||
@@ -1580,7 +1580,7 @@ static int emulate_bittest(struct vcpu *vcpu, const struct instr_emul_vie *vie)
|
||||
static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
|
||||
{
|
||||
struct instr_emul_vie *vie = &ctxt->vie;
|
||||
struct vcpu *vcpu = ctxt->vcpu;
|
||||
struct acrn_vcpu *vcpu = ctxt->vcpu;
|
||||
int error;
|
||||
|
||||
if (vie->decoded == 0U) {
|
||||
@@ -1628,7 +1628,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int vie_init(struct instr_emul_vie *vie, struct vcpu *vcpu)
|
||||
static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
|
||||
uint32_t inst_len = vcpu->arch_vcpu.inst_len;
|
||||
@@ -2182,7 +2182,7 @@ static int local_decode_instruction(enum vm_cpu_mode cpu_mode,
|
||||
}
|
||||
|
||||
/* for instruction MOVS/STO, check the gva gotten from DI/SI. */
|
||||
static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
|
||||
static int instr_check_di(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
|
||||
{
|
||||
int ret;
|
||||
struct instr_emul_vie *vie = &emul_ctxt->vie;
|
||||
@@ -2197,7 +2197,7 @@ static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
|
||||
static int instr_check_gva(struct acrn_vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
|
||||
enum vm_cpu_mode cpu_mode)
|
||||
{
|
||||
int ret;
|
||||
@@ -2283,7 +2283,7 @@ static int instr_check_gva(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int decode_instruction(struct vcpu *vcpu)
|
||||
int decode_instruction(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct instr_emul_ctxt *emul_ctxt;
|
||||
uint32_t csar;
|
||||
@@ -2343,7 +2343,7 @@ int decode_instruction(struct vcpu *vcpu)
|
||||
return (int)(emul_ctxt->vie.opsize);
|
||||
}
|
||||
|
||||
int emulate_instruction(const struct vcpu *vcpu)
|
||||
int emulate_instruction(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct instr_emul_ctxt *ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
|
||||
|
||||
|
@@ -190,10 +190,10 @@ struct vm_guest_paging {
|
||||
struct instr_emul_ctxt {
|
||||
struct instr_emul_vie vie;
|
||||
struct vm_guest_paging paging;
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
};
|
||||
|
||||
int emulate_instruction(const struct vcpu *vcpu);
|
||||
int decode_instruction(struct vcpu *vcpu);
|
||||
int emulate_instruction(const struct acrn_vcpu *vcpu);
|
||||
int decode_instruction(struct acrn_vcpu *vcpu);
|
||||
|
||||
#endif
|
||||
|
@@ -32,7 +32,7 @@ static inline size_t get_ucode_data_size(const struct ucode_header *uhdr)
|
||||
return ((uhdr->data_size != 0U) ? uhdr->data_size : 2000U);
|
||||
}
|
||||
|
||||
void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
|
||||
void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v)
|
||||
{
|
||||
uint64_t gva, fault_addr;
|
||||
struct ucode_header uhdr;
|
||||
|
@@ -10,7 +10,7 @@
|
||||
|
||||
vm_sw_loader_t vm_sw_loader;
|
||||
|
||||
inline uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg)
|
||||
inline uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
|
||||
{
|
||||
const struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -18,7 +18,7 @@ inline uint64_t vcpu_get_gpreg(const struct vcpu *vcpu, uint32_t reg)
|
||||
return ctx->guest_cpu_regs.longs[reg];
|
||||
}
|
||||
|
||||
inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
inline void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -26,7 +26,7 @@ inline void vcpu_set_gpreg(struct vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
ctx->guest_cpu_regs.longs[reg] = val;
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -37,13 +37,13 @@ inline uint64_t vcpu_get_rip(struct vcpu *vcpu)
|
||||
return ctx->rip;
|
||||
}
|
||||
|
||||
inline void vcpu_set_rip(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rip = val;
|
||||
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -51,7 +51,7 @@ inline uint64_t vcpu_get_rsp(struct vcpu *vcpu)
|
||||
return ctx->guest_cpu_regs.regs.rsp;
|
||||
}
|
||||
|
||||
inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -60,7 +60,7 @@ inline void vcpu_set_rsp(struct vcpu *vcpu, uint64_t val)
|
||||
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -71,14 +71,14 @@ inline uint64_t vcpu_get_efer(struct vcpu *vcpu)
|
||||
return ctx->ia32_efer;
|
||||
}
|
||||
|
||||
inline void vcpu_set_efer(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.ia32_efer
|
||||
= val;
|
||||
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
@@ -90,14 +90,14 @@ inline uint64_t vcpu_get_rflags(struct vcpu *vcpu)
|
||||
return ctx->rflags;
|
||||
}
|
||||
|
||||
inline void vcpu_set_rflags(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rflags =
|
||||
val;
|
||||
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t mask;
|
||||
struct run_context *ctx =
|
||||
@@ -111,23 +111,23 @@ inline uint64_t vcpu_get_cr0(struct vcpu *vcpu)
|
||||
return ctx->cr0;
|
||||
}
|
||||
|
||||
inline void vcpu_set_cr0(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vmx_write_cr0(vcpu, val);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_cr2(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_cr2(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->
|
||||
arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2;
|
||||
}
|
||||
|
||||
inline void vcpu_set_cr2(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2 = val;
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t mask;
|
||||
struct run_context *ctx =
|
||||
@@ -141,29 +141,29 @@ inline uint64_t vcpu_get_cr4(struct vcpu *vcpu)
|
||||
return ctx->cr4;
|
||||
}
|
||||
|
||||
inline void vcpu_set_cr4(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vmx_write_cr4(vcpu, val);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_pat_ext(const struct vcpu *vcpu)
|
||||
inline uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
|
||||
ext_ctx.ia32_pat;
|
||||
}
|
||||
|
||||
inline void vcpu_set_pat_ext(struct vcpu *vcpu, uint64_t val)
|
||||
inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx.ia32_pat
|
||||
= val;
|
||||
}
|
||||
|
||||
struct vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
|
||||
struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
|
||||
{
|
||||
return per_cpu(ever_run_vcpu, pcpu_id);
|
||||
}
|
||||
|
||||
static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efer,
|
||||
static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efer,
|
||||
uint64_t cr0)
|
||||
{
|
||||
if (ia32_efer & MSR_IA32_EFER_LMA_BIT) {
|
||||
@@ -178,7 +178,7 @@ static void set_vcpu_mode(struct vcpu *vcpu, uint32_t cs_attr, uint64_t ia32_efe
|
||||
}
|
||||
}
|
||||
|
||||
void set_vcpu_regs(struct vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
|
||||
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
|
||||
{
|
||||
struct ext_context *ectx;
|
||||
struct run_context *ctx;
|
||||
@@ -280,12 +280,12 @@ static struct acrn_vcpu_regs realmode_init_regs = {
|
||||
.cr4 = 0UL,
|
||||
};
|
||||
|
||||
void reset_vcpu_regs(struct vcpu *vcpu)
|
||||
void reset_vcpu_regs(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
set_vcpu_regs(vcpu, &realmode_init_regs);
|
||||
}
|
||||
|
||||
void set_ap_entry(struct vcpu *vcpu, uint64_t entry)
|
||||
void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
|
||||
{
|
||||
struct ext_context *ectx;
|
||||
|
||||
@@ -311,9 +311,9 @@ void set_ap_entry(struct vcpu *vcpu, uint64_t entry)
|
||||
* for physical CPU 1 : vcpu->pcpu_id = 1, vcpu->vcpu_id = 1, vmid = 1;
|
||||
*
|
||||
***********************************************************************/
|
||||
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_handle)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
uint16_t vcpu_id;
|
||||
|
||||
pr_info("Creating VCPU working on PCPU%hu", pcpu_id);
|
||||
@@ -329,7 +329,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
}
|
||||
/* Allocate memory for VCPU */
|
||||
vcpu = &(vm->hw.vcpu_array[vcpu_id]);
|
||||
(void)memset((void *)vcpu, 0U, sizeof(struct vcpu));
|
||||
(void)memset((void *)vcpu, 0U, sizeof(struct acrn_vcpu));
|
||||
|
||||
/* Initialize CPU ID for this VCPU */
|
||||
vcpu->vcpu_id = vcpu_id;
|
||||
@@ -387,7 +387,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
int run_vcpu(struct vcpu *vcpu)
|
||||
int run_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t instlen, cs_attr;
|
||||
uint64_t rip, ia32_efer, cr0;
|
||||
@@ -487,7 +487,7 @@ int run_vcpu(struct vcpu *vcpu)
|
||||
return status;
|
||||
}
|
||||
|
||||
int shutdown_vcpu(__unused struct vcpu *vcpu)
|
||||
int shutdown_vcpu(__unused struct acrn_vcpu *vcpu)
|
||||
{
|
||||
/* TODO : Implement VCPU shutdown sequence */
|
||||
|
||||
@@ -497,7 +497,7 @@ int shutdown_vcpu(__unused struct vcpu *vcpu)
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
void offline_vcpu(struct vcpu *vcpu)
|
||||
void offline_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vlapic_free(vcpu);
|
||||
per_cpu(ever_run_vcpu, vcpu->pcpu_id) = NULL;
|
||||
@@ -508,7 +508,7 @@ void offline_vcpu(struct vcpu *vcpu)
|
||||
/* NOTE:
|
||||
* vcpu should be paused before call this function.
|
||||
*/
|
||||
void reset_vcpu(struct vcpu *vcpu)
|
||||
void reset_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct acrn_vlapic *vlapic;
|
||||
@@ -546,7 +546,7 @@ void reset_vcpu(struct vcpu *vcpu)
|
||||
reset_vcpu_regs(vcpu);
|
||||
}
|
||||
|
||||
void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
|
||||
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
{
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
@@ -572,7 +572,7 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
|
||||
}
|
||||
}
|
||||
|
||||
void resume_vcpu(struct vcpu *vcpu)
|
||||
void resume_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
pr_dbg("vcpu%hu resumed", vcpu->vcpu_id);
|
||||
|
||||
@@ -586,7 +586,7 @@ void resume_vcpu(struct vcpu *vcpu)
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
}
|
||||
|
||||
void schedule_vcpu(struct vcpu *vcpu)
|
||||
void schedule_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu->state = VCPU_RUNNING;
|
||||
pr_dbg("vcpu%hu scheduled", vcpu->vcpu_id);
|
||||
@@ -601,7 +601,7 @@ void schedule_vcpu(struct vcpu *vcpu)
|
||||
int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
|
||||
{
|
||||
int ret = 0;
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
ret = create_vcpu(pcpu_id, vm, &vcpu);
|
||||
if (ret != 0) {
|
||||
@@ -619,7 +619,7 @@ int prepare_vcpu(struct vm *vm, uint16_t pcpu_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void request_vcpu_pre_work(struct vcpu *vcpu, uint16_t pre_work_id)
|
||||
void request_vcpu_pre_work(struct acrn_vcpu *vcpu, uint16_t pre_work_id)
|
||||
{
|
||||
bitmap_set_lock(pre_work_id, &vcpu->pending_pre_work);
|
||||
}
|
||||
@@ -633,7 +633,7 @@ void vcpu_dumpreg(void *data)
|
||||
uint64_t i, fault_addr, tmp[DUMPREG_SP_SIZE];
|
||||
uint32_t err_code = 0;
|
||||
struct vcpu_dump *dump = data;
|
||||
struct vcpu *vcpu = dump->vcpu;
|
||||
struct acrn_vcpu *vcpu = dump->vcpu;
|
||||
char *str = dump->str;
|
||||
size_t len, size = dump->str_max;
|
||||
|
||||
|
@@ -109,7 +109,7 @@ static inline bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
|
||||
static struct acrn_vlapic *
|
||||
vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
|
||||
{
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
|
||||
vcpu = vcpu_from_vid(vm, vcpu_id);
|
||||
|
||||
@@ -119,7 +119,7 @@ vm_lapic_from_vcpu_id(struct vm *vm, uint16_t vcpu_id)
|
||||
static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
|
||||
{
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
|
||||
@@ -138,7 +138,7 @@ vm_active_cpus(const struct vm *vm)
|
||||
{
|
||||
uint64_t dmask = 0UL;
|
||||
uint16_t i;
|
||||
const struct vcpu *vcpu;
|
||||
const struct acrn_vcpu *vcpu;
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
bitmap_set_lock(vcpu->vcpu_id, &dmask);
|
||||
@@ -163,7 +163,7 @@ vlapic_get_apicid(struct acrn_vlapic *vlapic)
|
||||
static inline uint32_t
|
||||
vlapic_build_id(const struct acrn_vlapic *vlapic)
|
||||
{
|
||||
const struct vcpu *vcpu = vlapic->vcpu;
|
||||
const struct acrn_vcpu *vcpu = vlapic->vcpu;
|
||||
uint8_t vlapic_id;
|
||||
uint32_t lapic_regs_id;
|
||||
|
||||
@@ -560,7 +560,7 @@ void vlapic_post_intr(uint16_t dest_pcpu_id)
|
||||
*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
uint64_t apicv_get_pir_desc_paddr(struct vcpu *vcpu)
|
||||
uint64_t apicv_get_pir_desc_paddr(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic;
|
||||
|
||||
@@ -757,7 +757,7 @@ static void
|
||||
vlapic_fire_lvt(struct acrn_vlapic *vlapic, uint32_t lvt)
|
||||
{
|
||||
uint32_t vec, mode;
|
||||
struct vcpu *vcpu = vlapic->vcpu;
|
||||
struct acrn_vcpu *vcpu = vlapic->vcpu;
|
||||
|
||||
if ((lvt & APIC_LVT_M) != 0U) {
|
||||
return;
|
||||
@@ -938,7 +938,7 @@ static int
|
||||
vlapic_trigger_lvt(struct acrn_vlapic *vlapic, uint32_t vector)
|
||||
{
|
||||
uint32_t lvt;
|
||||
struct vcpu *vcpu = vlapic->vcpu;
|
||||
struct acrn_vcpu *vcpu = vlapic->vcpu;
|
||||
|
||||
if (vlapic_enabled(vlapic) == false) {
|
||||
/*
|
||||
@@ -1150,7 +1150,7 @@ vlapic_set_cr8(struct acrn_vlapic *vlapic, uint64_t val)
|
||||
uint32_t tpr;
|
||||
|
||||
if ((val & ~0xfUL) != 0U) {
|
||||
struct vcpu *vcpu = vlapic->vcpu;
|
||||
struct acrn_vcpu *vcpu = vlapic->vcpu;
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
return;
|
||||
}
|
||||
@@ -1170,7 +1170,7 @@ vlapic_get_cr8(const struct acrn_vlapic *vlapic)
|
||||
}
|
||||
|
||||
static void
|
||||
vlapic_process_init_sipi(struct vcpu* target_vcpu, uint32_t mode,
|
||||
vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
|
||||
uint32_t icr_low, uint16_t vcpu_id)
|
||||
{
|
||||
if (mode == APIC_DELMODE_INIT) {
|
||||
@@ -1224,7 +1224,7 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
|
||||
uint32_t icr_low, icr_high, dest;
|
||||
uint32_t vec, mode, shorthand;
|
||||
struct lapic_regs *lapic;
|
||||
struct vcpu *target_vcpu;
|
||||
struct acrn_vcpu *target_vcpu;
|
||||
|
||||
lapic = &(vlapic->apic_page);
|
||||
lapic->icr_lo.v &= ~APIC_DELSTAT_PEND;
|
||||
@@ -1803,7 +1803,7 @@ vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
|
||||
bool lowprio;
|
||||
uint16_t vcpu_id;
|
||||
uint64_t dmask;
|
||||
struct vcpu *target_vcpu;
|
||||
struct acrn_vcpu *target_vcpu;
|
||||
|
||||
if ((delmode != IOAPIC_RTE_DELFIXED) &&
|
||||
(delmode != IOAPIC_RTE_DELLOPRI) &&
|
||||
@@ -1885,7 +1885,7 @@ vlapic_apicv_batch_set_tmr(struct acrn_vlapic *vlapic)
|
||||
void
|
||||
vlapic_reset_tmr(struct acrn_vlapic *vlapic)
|
||||
{
|
||||
struct vcpu *vcpu = vlapic->vcpu;
|
||||
struct acrn_vcpu *vcpu = vlapic->vcpu;
|
||||
uint32_t vector;
|
||||
|
||||
dev_dbg(ACRN_DBG_LAPIC,
|
||||
@@ -1927,7 +1927,7 @@ vlapic_set_tmr_one_vec(struct acrn_vlapic *vlapic, uint32_t delmode,
|
||||
}
|
||||
|
||||
int
|
||||
vlapic_set_intr(struct vcpu *vcpu, uint32_t vector, bool level)
|
||||
vlapic_set_intr(struct acrn_vcpu *vcpu, uint32_t vector, bool level)
|
||||
{
|
||||
struct acrn_vlapic *vlapic;
|
||||
|
||||
@@ -2052,7 +2052,7 @@ vlapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
|
||||
/* interrupt context */
|
||||
static void vlapic_timer_expired(void *data)
|
||||
{
|
||||
struct vcpu *vcpu = (struct vcpu *)data;
|
||||
struct acrn_vcpu *vcpu = (struct acrn_vcpu *)data;
|
||||
struct acrn_vlapic *vlapic;
|
||||
struct lapic_regs *lapic;
|
||||
|
||||
@@ -2101,7 +2101,7 @@ vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
|
||||
uint32_t icr_low = val;
|
||||
uint32_t mode = icr_low & APIC_DELMODE_MASK;
|
||||
uint16_t vcpu_id;
|
||||
struct vcpu *target_vcpu;
|
||||
struct acrn_vcpu *target_vcpu;
|
||||
bool phys;
|
||||
uint32_t shorthand;
|
||||
|
||||
@@ -2135,7 +2135,7 @@ vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write,
|
||||
static int vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write,
|
||||
uint64_t *val)
|
||||
{
|
||||
struct acrn_vlapic *vlapic;
|
||||
@@ -2172,7 +2172,7 @@ static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write,
|
||||
}
|
||||
|
||||
int
|
||||
vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval)
|
||||
vlapic_rdmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t *rval)
|
||||
{
|
||||
int error = 0;
|
||||
struct acrn_vlapic *vlapic;
|
||||
@@ -2204,7 +2204,7 @@ vlapic_rdmsr(struct vcpu *vcpu, uint32_t msr, uint64_t *rval)
|
||||
}
|
||||
|
||||
int
|
||||
vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval)
|
||||
vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
|
||||
{
|
||||
int error = 0;
|
||||
struct acrn_vlapic *vlapic;
|
||||
@@ -2236,7 +2236,7 @@ vlapic_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t wval)
|
||||
return error;
|
||||
}
|
||||
|
||||
int vlapic_create(struct vcpu *vcpu)
|
||||
int vlapic_create(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch_vcpu.vlapic.vm = vcpu->vm;
|
||||
vcpu->arch_vcpu.vlapic.vcpu = vcpu;
|
||||
@@ -2263,7 +2263,7 @@ int vlapic_create(struct vcpu *vcpu)
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
void vlapic_free(struct vcpu *vcpu)
|
||||
void vlapic_free(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic = NULL;
|
||||
|
||||
@@ -2442,7 +2442,7 @@ vlapic_apicv_inject_pir(struct acrn_vlapic *vlapic)
|
||||
}
|
||||
}
|
||||
|
||||
int apic_access_vmexit_handler(struct vcpu *vcpu)
|
||||
int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int err = 0;
|
||||
uint32_t offset = 0U;
|
||||
@@ -2487,7 +2487,7 @@ int apic_access_vmexit_handler(struct vcpu *vcpu)
|
||||
return err;
|
||||
}
|
||||
|
||||
int veoi_vmexit_handler(struct vcpu *vcpu)
|
||||
int veoi_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic = NULL;
|
||||
|
||||
@@ -2520,7 +2520,7 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
|
||||
{
|
||||
struct lapic_regs *lapic;
|
||||
uint32_t vector;
|
||||
struct vcpu *target_vcpu;
|
||||
struct acrn_vcpu *target_vcpu;
|
||||
|
||||
lapic = &(vlapic->apic_page);
|
||||
vector = lapic->self_ipi.v & 0xFFU;
|
||||
@@ -2528,7 +2528,7 @@ static void vlapic_x2apic_self_ipi_handler(struct acrn_vlapic *vlapic)
|
||||
vlapic_set_intr(target_vcpu, vector, LAPIC_TRIG_EDGE);
|
||||
}
|
||||
|
||||
int apic_write_vmexit_handler(struct vcpu *vcpu)
|
||||
int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t qual;
|
||||
int error, handled;
|
||||
@@ -2598,7 +2598,7 @@ int apic_write_vmexit_handler(struct vcpu *vcpu)
|
||||
return handled;
|
||||
}
|
||||
|
||||
int tpr_below_threshold_vmexit_handler(__unused struct vcpu *vcpu)
|
||||
int tpr_below_threshold_vmexit_handler(__unused struct acrn_vcpu *vcpu)
|
||||
{
|
||||
pr_err("Unhandled %s.", __func__);
|
||||
return 0;
|
||||
|
@@ -189,7 +189,7 @@ int shutdown_vm(struct vm *vm)
|
||||
{
|
||||
int status = 0;
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
pause_vm(vm);
|
||||
|
||||
@@ -235,7 +235,7 @@ int shutdown_vm(struct vm *vm)
|
||||
*/
|
||||
int start_vm(struct vm *vm)
|
||||
{
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
vm->state = VM_STARTED;
|
||||
|
||||
@@ -252,7 +252,7 @@ int start_vm(struct vm *vm)
|
||||
int reset_vm(struct vm *vm)
|
||||
{
|
||||
int i;
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
if (vm->state != VM_PAUSED) {
|
||||
return -1;
|
||||
@@ -279,7 +279,7 @@ int reset_vm(struct vm *vm)
|
||||
void pause_vm(struct vm *vm)
|
||||
{
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
if (vm->state == VM_PAUSED) {
|
||||
return;
|
||||
@@ -298,7 +298,7 @@ void pause_vm(struct vm *vm)
|
||||
void resume_vm(struct vm *vm)
|
||||
{
|
||||
uint16_t i;
|
||||
struct vcpu *vcpu = NULL;
|
||||
struct acrn_vcpu *vcpu = NULL;
|
||||
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
resume_vcpu(vcpu);
|
||||
@@ -323,7 +323,7 @@ void resume_vm(struct vm *vm)
|
||||
*/
|
||||
void resume_vm_from_s3(struct vm *vm, uint32_t wakeup_vec)
|
||||
{
|
||||
struct vcpu *bsp = vcpu_from_vid(vm, 0U);
|
||||
struct acrn_vcpu *bsp = vcpu_from_vid(vm, 0U);
|
||||
|
||||
vm->state = VM_STARTED;
|
||||
|
||||
|
@@ -12,7 +12,7 @@
|
||||
* This function should always return 0 since we shouldn't
|
||||
* deal with hypercall error in hypervisor.
|
||||
*/
|
||||
int vmcall_vmexit_handler(struct vcpu *vcpu)
|
||||
int vmcall_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t ret = -EACCES;
|
||||
struct vm *vm = vcpu->vm;
|
||||
|
@@ -132,7 +132,7 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode)
|
||||
}
|
||||
}
|
||||
|
||||
void init_msr_emulation(struct vcpu *vcpu)
|
||||
void init_msr_emulation(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t msrs_count = ARRAY_SIZE(emulated_msrs);
|
||||
@@ -184,7 +184,7 @@ void init_msr_emulation(struct vcpu *vcpu)
|
||||
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
|
||||
}
|
||||
|
||||
int rdmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int err = 0;
|
||||
uint32_t msr;
|
||||
@@ -297,7 +297,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
return err;
|
||||
}
|
||||
|
||||
int wrmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int err = 0;
|
||||
uint32_t msr;
|
||||
@@ -428,7 +428,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
return err;
|
||||
}
|
||||
|
||||
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu)
|
||||
void update_msr_bitmap_x2apic_apicv(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint8_t *msr_bitmap;
|
||||
|
||||
@@ -447,7 +447,7 @@ void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu)
|
||||
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_SELF_IPI, READ);
|
||||
}
|
||||
|
||||
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu)
|
||||
void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t msr;
|
||||
uint8_t *msr_bitmap;
|
||||
|
@@ -24,7 +24,7 @@ static void complete_ioreq(struct vhm_request *vhm_req)
|
||||
* request having transferred to the COMPLETE state.
|
||||
*/
|
||||
static void
|
||||
emulate_pio_post(struct vcpu *vcpu, const struct io_request *io_req)
|
||||
emulate_pio_post(struct acrn_vcpu *vcpu, const struct io_request *io_req)
|
||||
{
|
||||
const struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
|
||||
@@ -46,7 +46,7 @@ emulate_pio_post(struct vcpu *vcpu, const struct io_request *io_req)
|
||||
* @remark This function must be called after the VHM request corresponding to
|
||||
* \p vcpu being transferred to the COMPLETE state.
|
||||
*/
|
||||
void dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
void dm_emulate_pio_post(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint16_t cur = vcpu->vcpu_id;
|
||||
union vhm_request_buffer *req_buf = NULL;
|
||||
@@ -77,7 +77,7 @@ void dm_emulate_pio_post(struct vcpu *vcpu)
|
||||
* either a previous call to emulate_io() returning 0 or the corresponding VHM
|
||||
* request transferring to the COMPLETE state.
|
||||
*/
|
||||
void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
|
||||
void emulate_mmio_post(const struct acrn_vcpu *vcpu, const struct io_request *io_req)
|
||||
{
|
||||
const struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
||||
|
||||
@@ -97,7 +97,7 @@ void emulate_mmio_post(const struct vcpu *vcpu, const struct io_request *io_req)
|
||||
* @remark This function must be called after the VHM request corresponding to
|
||||
* \p vcpu being transferred to the COMPLETE state.
|
||||
*/
|
||||
void dm_emulate_mmio_post(struct vcpu *vcpu)
|
||||
void dm_emulate_mmio_post(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint16_t cur = vcpu->vcpu_id;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
@@ -132,7 +132,7 @@ static void io_instr_dest_handler(struct io_request *io_req)
|
||||
*
|
||||
* @param vcpu The virtual CPU that triggers the MMIO access
|
||||
*/
|
||||
void emulate_io_post(struct vcpu *vcpu)
|
||||
void emulate_io_post(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
union vhm_request_buffer *req_buf;
|
||||
struct vhm_request *vhm_req;
|
||||
@@ -191,7 +191,7 @@ void emulate_io_post(struct vcpu *vcpu)
|
||||
* @return -EIO - The request spans multiple devices and cannot be emulated.
|
||||
*/
|
||||
int32_t
|
||||
hv_emulate_pio(const struct vcpu *vcpu, struct io_request *io_req)
|
||||
hv_emulate_pio(const struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int32_t status = -ENODEV;
|
||||
uint16_t port, size;
|
||||
@@ -249,7 +249,7 @@ hv_emulate_pio(const struct vcpu *vcpu, struct io_request *io_req)
|
||||
* @return -EIO - The request spans multiple devices and cannot be emulated.
|
||||
*/
|
||||
static int32_t
|
||||
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
hv_emulate_mmio(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int status = -ENODEV;
|
||||
uint64_t address, size;
|
||||
@@ -299,7 +299,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
|
||||
* @return Negative on other errors during emulation.
|
||||
*/
|
||||
int32_t
|
||||
emulate_io(struct vcpu *vcpu, struct io_request *io_req)
|
||||
emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
||||
{
|
||||
int32_t status;
|
||||
|
||||
@@ -357,7 +357,7 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
|
||||
*
|
||||
* @param vcpu The virtual CPU which triggers the VM exit on I/O instruction
|
||||
*/
|
||||
int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
|
||||
int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t status;
|
||||
uint64_t exit_qual;
|
||||
|
@@ -384,7 +384,7 @@ void dispatch_exception(struct intr_excp_ctx *ctx)
|
||||
void partition_mode_dispatch_interrupt(struct intr_excp_ctx *ctx)
|
||||
{
|
||||
uint8_t vr = ctx->vector;
|
||||
struct vcpu *vcpu;
|
||||
struct acrn_vcpu *vcpu;
|
||||
|
||||
/*
|
||||
* There is no vector and APIC ID remapping for VMs in
|
||||
|
@@ -168,7 +168,7 @@ void flush_vpid_global(void)
|
||||
local_invvpid(VMX_VPID_TYPE_ALL_CONTEXT, 0U, 0UL);
|
||||
}
|
||||
|
||||
void invept(const struct vcpu *vcpu)
|
||||
void invept(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct invept_desc desc = {0};
|
||||
|
||||
|
@@ -63,23 +63,23 @@ get_subrange_start_of_fixed_mtrr(uint32_t index, uint32_t subrange_id)
|
||||
get_subrange_size_of_fixed_mtrr(index));
|
||||
}
|
||||
|
||||
static inline bool is_mtrr_enabled(const struct vcpu *vcpu)
|
||||
static inline bool is_mtrr_enabled(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->mtrr.def_type.bits.enable != 0U);
|
||||
}
|
||||
|
||||
static inline bool is_fixed_range_mtrr_enabled(const struct vcpu *vcpu)
|
||||
static inline bool is_fixed_range_mtrr_enabled(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return ((vcpu->mtrr.cap.bits.fix != 0U) &&
|
||||
(vcpu->mtrr.def_type.bits.fixed_enable != 0U));
|
||||
}
|
||||
|
||||
static inline uint8_t get_default_memory_type(const struct vcpu *vcpu)
|
||||
static inline uint8_t get_default_memory_type(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return (uint8_t)(vcpu->mtrr.def_type.bits.type);
|
||||
}
|
||||
|
||||
void init_mtrr(struct vcpu *vcpu)
|
||||
void init_mtrr(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
union mtrr_cap_reg cap = {0};
|
||||
uint32_t i;
|
||||
@@ -154,7 +154,7 @@ static uint32_t update_ept(struct vm *vm, uint64_t start,
|
||||
return attr;
|
||||
}
|
||||
|
||||
static void update_ept_mem_type(const struct vcpu *vcpu)
|
||||
static void update_ept_mem_type(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint8_t type;
|
||||
uint64_t start, size;
|
||||
@@ -193,7 +193,7 @@ static void update_ept_mem_type(const struct vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
|
||||
void mtrr_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t value)
|
||||
{
|
||||
uint32_t index;
|
||||
|
||||
@@ -236,7 +236,7 @@ void mtrr_wrmsr(struct vcpu *vcpu, uint32_t msr, uint64_t value)
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t mtrr_rdmsr(const struct vcpu *vcpu, uint32_t msr)
|
||||
uint64_t mtrr_rdmsr(const struct acrn_vcpu *vcpu, uint32_t msr)
|
||||
{
|
||||
const struct mtrr_state *mtrr = &vcpu->mtrr;
|
||||
uint64_t ret = 0UL;
|
||||
|
@@ -181,7 +181,7 @@ void destroy_secure_world(struct vm *vm, bool need_clr_mem)
|
||||
|
||||
}
|
||||
|
||||
static void save_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
|
||||
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
|
||||
{
|
||||
/* cache on-demand run_context for efer/rflags/rsp/rip */
|
||||
(void)vcpu_get_efer(vcpu);
|
||||
@@ -235,7 +235,7 @@ static void save_world_ctx(struct vcpu *vcpu, struct ext_context *ext_ctx)
|
||||
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
|
||||
}
|
||||
|
||||
static void load_world_ctx(struct vcpu *vcpu, const struct ext_context *ext_ctx)
|
||||
static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
|
||||
{
|
||||
/* mark to update on-demand run_context for efer/rflags/rsp */
|
||||
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
@@ -291,7 +291,7 @@ static void copy_smc_param(const struct run_context *prev_ctx,
|
||||
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
|
||||
}
|
||||
|
||||
void switch_world(struct vcpu *vcpu, int next_world)
|
||||
void switch_world(struct acrn_vcpu *vcpu, int next_world)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
|
||||
@@ -327,7 +327,7 @@ void switch_world(struct vcpu *vcpu, int next_world)
|
||||
/* Put key_info and trusty_startup_param in the first Page of Trusty
|
||||
* runtime memory
|
||||
*/
|
||||
static bool setup_trusty_info(struct vcpu *vcpu,
|
||||
static bool setup_trusty_info(struct acrn_vcpu *vcpu,
|
||||
uint32_t mem_size, uint64_t mem_base_hpa)
|
||||
{
|
||||
uint32_t i;
|
||||
@@ -381,7 +381,7 @@ static bool setup_trusty_info(struct vcpu *vcpu,
|
||||
* RIP, RSP and RDI are specified below, other GP registers are leaved
|
||||
* as 0.
|
||||
*/
|
||||
static bool init_secure_world_env(struct vcpu *vcpu,
|
||||
static bool init_secure_world_env(struct acrn_vcpu *vcpu,
|
||||
uint64_t entry_gpa,
|
||||
uint64_t base_hpa,
|
||||
uint32_t size)
|
||||
@@ -398,7 +398,7 @@ static bool init_secure_world_env(struct vcpu *vcpu,
|
||||
return setup_trusty_info(vcpu, size, base_hpa);
|
||||
}
|
||||
|
||||
bool initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
|
||||
{
|
||||
uint64_t trusty_entry_gpa, trusty_base_gpa, trusty_base_hpa;
|
||||
uint32_t trusty_mem_size;
|
||||
@@ -477,7 +477,7 @@ void trusty_set_dseed(const void *dseed, uint8_t dseed_num)
|
||||
dseed, sizeof(struct seed_info) * dseed_num);
|
||||
}
|
||||
|
||||
void save_sworld_context(struct vcpu *vcpu)
|
||||
void save_sworld_context(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)memcpy_s(&vcpu->vm->sworld_snapshot,
|
||||
sizeof(struct cpu_context),
|
||||
@@ -485,7 +485,7 @@ void save_sworld_context(struct vcpu *vcpu)
|
||||
sizeof(struct cpu_context));
|
||||
}
|
||||
|
||||
void restore_sworld_context(struct vcpu *vcpu)
|
||||
void restore_sworld_context(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct secure_world_control *sworld_ctl =
|
||||
&vcpu->vm->sworld_control;
|
||||
|
@@ -49,7 +49,7 @@ static const uint16_t exception_type[32] = {
|
||||
[31] = VMX_INT_TYPE_HW_EXP
|
||||
};
|
||||
|
||||
static bool is_guest_irq_enabled(struct vcpu *vcpu)
|
||||
static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t guest_rflags, guest_state;
|
||||
bool status = false;
|
||||
@@ -70,7 +70,7 @@ static bool is_guest_irq_enabled(struct vcpu *vcpu)
|
||||
return status;
|
||||
}
|
||||
|
||||
static bool vcpu_pending_request(struct vcpu *vcpu)
|
||||
static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic;
|
||||
uint32_t vector = 0U;
|
||||
@@ -91,7 +91,7 @@ static bool vcpu_pending_request(struct vcpu *vcpu)
|
||||
return vcpu->arch_vcpu.pending_req != 0UL;
|
||||
}
|
||||
|
||||
void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
|
||||
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
|
||||
{
|
||||
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
|
||||
/*
|
||||
@@ -108,7 +108,7 @@ void vcpu_make_request(struct vcpu *vcpu, uint16_t eventid)
|
||||
}
|
||||
}
|
||||
|
||||
static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
|
||||
static int vcpu_inject_vlapic_int(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
|
||||
uint32_t vector = 0U;
|
||||
@@ -148,10 +148,10 @@ static int vcpu_inject_vlapic_int(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_do_pending_extint(const struct vcpu *vcpu)
|
||||
static int vcpu_do_pending_extint(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct vm *vm;
|
||||
struct vcpu *primary;
|
||||
struct acrn_vcpu *primary;
|
||||
uint32_t vector;
|
||||
|
||||
vm = vcpu->vm;
|
||||
@@ -205,7 +205,7 @@ static int get_excep_class(uint32_t vector)
|
||||
}
|
||||
}
|
||||
|
||||
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code)
|
||||
int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
/* VECTOR_INVALID is also greater than 32 */
|
||||
@@ -250,7 +250,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector, uint32_t err_code)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
|
||||
static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
@@ -266,7 +266,7 @@ static void vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
|
||||
vcpu_retain_rip(vcpu);
|
||||
}
|
||||
|
||||
static int vcpu_inject_hi_exception(struct vcpu *vcpu)
|
||||
static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
|
||||
|
||||
@@ -278,7 +278,7 @@ static int vcpu_inject_hi_exception(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_inject_lo_exception(struct vcpu *vcpu)
|
||||
static int vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
|
||||
|
||||
@@ -292,26 +292,26 @@ static int vcpu_inject_lo_exception(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Inject external interrupt to guest */
|
||||
void vcpu_inject_extint(struct vcpu *vcpu)
|
||||
void vcpu_inject_extint(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EXTINT);
|
||||
}
|
||||
|
||||
/* Inject NMI to guest */
|
||||
void vcpu_inject_nmi(struct vcpu *vcpu)
|
||||
void vcpu_inject_nmi(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
|
||||
}
|
||||
|
||||
/* Inject general protection exception(#GP) to guest */
|
||||
void vcpu_inject_gp(struct vcpu *vcpu, uint32_t err_code)
|
||||
void vcpu_inject_gp(struct acrn_vcpu *vcpu, uint32_t err_code)
|
||||
{
|
||||
(void)vcpu_queue_exception(vcpu, IDT_GP, err_code);
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
|
||||
}
|
||||
|
||||
/* Inject page fault exception(#PF) to guest */
|
||||
void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code)
|
||||
void vcpu_inject_pf(struct acrn_vcpu *vcpu, uint64_t addr, uint32_t err_code)
|
||||
{
|
||||
vcpu_set_cr2(vcpu, addr);
|
||||
(void)vcpu_queue_exception(vcpu, IDT_PF, err_code);
|
||||
@@ -319,27 +319,27 @@ void vcpu_inject_pf(struct vcpu *vcpu, uint64_t addr, uint32_t err_code)
|
||||
}
|
||||
|
||||
/* Inject invalid opcode exception(#UD) to guest */
|
||||
void vcpu_inject_ud(struct vcpu *vcpu)
|
||||
void vcpu_inject_ud(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)vcpu_queue_exception(vcpu, IDT_UD, 0);
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
|
||||
}
|
||||
|
||||
/* Inject alignment check exception(#AC) to guest */
|
||||
void vcpu_inject_ac(struct vcpu *vcpu)
|
||||
void vcpu_inject_ac(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)vcpu_queue_exception(vcpu, IDT_AC, 0);
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
|
||||
}
|
||||
|
||||
/* Inject stack fault exception(#SS) to guest */
|
||||
void vcpu_inject_ss(struct vcpu *vcpu)
|
||||
void vcpu_inject_ss(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)vcpu_queue_exception(vcpu, IDT_SS, 0);
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EXCP);
|
||||
}
|
||||
|
||||
int interrupt_window_vmexit_handler(struct vcpu *vcpu)
|
||||
int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
|
||||
@@ -357,7 +357,7 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
||||
int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t intr_info;
|
||||
struct intr_excp_ctx ctx;
|
||||
@@ -389,7 +389,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acrn_handle_pending_request(struct vcpu *vcpu)
|
||||
int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t tmp;
|
||||
@@ -544,7 +544,7 @@ INTR_WIN:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void cancel_event_injection(struct vcpu *vcpu)
|
||||
void cancel_event_injection(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t intinfo;
|
||||
|
||||
@@ -572,7 +572,7 @@ void cancel_event_injection(struct vcpu *vcpu)
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
int exception_vmexit_handler(struct vcpu *vcpu)
|
||||
int exception_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t intinfo, int_err_code = 0U;
|
||||
uint32_t exception_vector = VECTOR_INVALID;
|
||||
|
@@ -12,8 +12,8 @@
|
||||
*/
|
||||
#define NR_VMX_EXIT_REASONS 65U
|
||||
|
||||
static int unhandled_vmexit_handler(struct vcpu *vcpu);
|
||||
static int xsetbv_vmexit_handler(struct vcpu *vcpu);
|
||||
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
|
||||
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
|
||||
|
||||
/* VM Dispatch table for Exit condition handling */
|
||||
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
|
||||
@@ -151,7 +151,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
|
||||
.handler = unhandled_vmexit_handler}
|
||||
};
|
||||
|
||||
int vmexit_handler(struct vcpu *vcpu)
|
||||
int vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct vm_exit_dispatch *dispatch = NULL;
|
||||
uint16_t basic_exit_reason;
|
||||
@@ -227,7 +227,7 @@ int vmexit_handler(struct vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int unhandled_vmexit_handler(struct vcpu *vcpu)
|
||||
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016llx ",
|
||||
exec_vmread(VMX_GUEST_RIP));
|
||||
@@ -242,7 +242,7 @@ static int unhandled_vmexit_handler(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpuid_vmexit_handler(struct vcpu *vcpu)
|
||||
int cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t rax, rbx, rcx, rdx;
|
||||
|
||||
@@ -262,7 +262,7 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
int cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t reg;
|
||||
uint32_t idx;
|
||||
@@ -317,7 +317,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
* XSETBV instruction set's the XCR0 that is used to tell for which
|
||||
* components states can be saved on a context switch using xsave.
|
||||
*/
|
||||
static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
||||
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int idx;
|
||||
uint64_t val64;
|
||||
|
@@ -26,8 +26,8 @@ static uint64_t cr4_host_mask;
|
||||
static uint64_t cr4_always_on_mask;
|
||||
static uint64_t cr4_always_off_mask;
|
||||
|
||||
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu);
|
||||
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu);
|
||||
void update_msr_bitmap_x2apic_apicv(struct acrn_vcpu *vcpu);
|
||||
void update_msr_bitmap_x2apic_passthru(struct acrn_vcpu *vcpu);
|
||||
|
||||
bool is_vmx_disabled(void)
|
||||
{
|
||||
@@ -86,7 +86,7 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
uint32_t tmp32;
|
||||
void *vmxon_region_va = (void *)per_cpu(vmxon_region, pcpu_id);
|
||||
uint64_t vmxon_region_pa;
|
||||
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||
|
||||
/* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */
|
||||
tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
|
||||
@@ -109,7 +109,7 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
void vmx_off(uint16_t pcpu_id)
|
||||
{
|
||||
|
||||
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||
uint64_t vmcs_pa;
|
||||
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
@@ -253,7 +253,7 @@ static void init_cr0_cr4_host_mask(void)
|
||||
pr_dbg("CR4 mask value: 0x%016llx", cr4_host_mask);
|
||||
}
|
||||
|
||||
uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu)
|
||||
uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* note: if context->cr0.CD is set, the actual value in guest's
|
||||
@@ -263,7 +263,7 @@ uint64_t vmx_rdmsr_pat(const struct vcpu *vcpu)
|
||||
return vcpu_get_pat_ext(vcpu);
|
||||
}
|
||||
|
||||
int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
|
||||
{
|
||||
uint32_t i;
|
||||
uint64_t field;
|
||||
@@ -291,7 +291,7 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void load_pdptrs(const struct vcpu *vcpu)
|
||||
static void load_pdptrs(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t guest_cr3 = exec_vmread(VMX_GUEST_CR3);
|
||||
/* TODO: check whether guest cr3 is valid */
|
||||
@@ -303,7 +303,7 @@ static void load_pdptrs(const struct vcpu *vcpu)
|
||||
exec_vmwrite64(VMX_GUEST_PDPTE3_FULL, get_pgentry(guest_cr3_hva + 3UL));
|
||||
}
|
||||
|
||||
static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
|
||||
static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
|
||||
{
|
||||
/* Shouldn't set always off bit */
|
||||
if ((cr0 & cr0_always_off_mask) != 0UL) {
|
||||
@@ -359,7 +359,7 @@ static bool is_cr0_write_valid(struct vcpu *vcpu, uint64_t cr0)
|
||||
* - PG (31) Trapped to track cpu/paging mode.
|
||||
* Set the value according to the value from guest.
|
||||
*/
|
||||
void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
|
||||
{
|
||||
uint64_t cr0_vmx;
|
||||
uint32_t entry_ctrls;
|
||||
@@ -450,7 +450,7 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
cr0, cr0_vmx);
|
||||
}
|
||||
|
||||
static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
|
||||
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
|
||||
{
|
||||
/* Check if guest try to set fixed to 0 bits or reserved bits */
|
||||
if ((cr4 & cr4_always_off_mask) != 0U) {
|
||||
@@ -511,7 +511,7 @@ static bool is_cr4_write_valid(struct vcpu *vcpu, uint64_t cr4)
|
||||
* - SMAP (21) Flexible to guest
|
||||
* - PKE (22) Flexible to guest
|
||||
*/
|
||||
void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
||||
void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
|
||||
{
|
||||
uint64_t cr4_vmx;
|
||||
uint64_t old_cr4 = vcpu_get_cr4(vcpu);
|
||||
@@ -545,7 +545,7 @@ void vmx_write_cr4(struct vcpu *vcpu, uint64_t cr4)
|
||||
}
|
||||
|
||||
/* rip, rsp, ia32_efer and rflags are written to VMCS in start_vcpu */
|
||||
static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
uint64_t cr4)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
@@ -590,7 +590,7 @@ static void init_guest_vmx(struct vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
|
||||
}
|
||||
|
||||
static void init_guest_state(struct vcpu *vcpu)
|
||||
static void init_guest_state(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
@@ -758,7 +758,7 @@ static uint32_t check_vmx_ctrl(uint32_t msr, uint32_t ctrl_req)
|
||||
|
||||
}
|
||||
|
||||
static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
uint64_t value64;
|
||||
@@ -948,7 +948,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
|
||||
}
|
||||
|
||||
static void init_entry_ctrl(__unused const struct vcpu *vcpu)
|
||||
static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
|
||||
@@ -1028,7 +1028,7 @@ static void init_exit_ctrl(void)
|
||||
/**
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
void init_vmcs(struct vcpu *vcpu)
|
||||
void init_vmcs(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t vmx_rev_id;
|
||||
uint64_t vmcs_pa;
|
||||
@@ -1057,7 +1057,7 @@ void init_vmcs(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PARTITION_MODE
|
||||
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
||||
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
|
||||
@@ -1067,7 +1067,7 @@ void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
||||
update_msr_bitmap_x2apic_apicv(vcpu);
|
||||
}
|
||||
#else
|
||||
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
||||
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t value32;
|
||||
if(vcpu->vm->vm_desc->lapic_pt) {
|
||||
|
Reference in New Issue
Block a user