mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-29 14:37:36 +00:00
HV:treewide:rename vcpu_arch data structure
For data structure types "struct vcpu_arch", its name shall follow Naming convention. Naming convention rule:If the data structure type is used by multi modules, its corresponding logic resource is exposed to external components (such as SOS, UOS), and its name meaning is simplistic (such as vcpu, vm), its name needs prefix "acrn_". Variable name can be shortened from its data structure type name. The following udpates are made: struct vcpu_arch arch_vcpu-->struct acrn_vcpu_arch arch Tracked-On: #861 Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
This commit is contained in:
parent
fa26a16645
commit
ace4f48c9a
@ -30,7 +30,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
|
||||
void *eptp;
|
||||
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
|
||||
|
||||
if ((vcpu != NULL) && (vcpu->arch_vcpu.cur_context == SECURE_WORLD)) {
|
||||
if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) {
|
||||
eptp = vm->arch_vm.sworld_eptp;
|
||||
} else {
|
||||
eptp = vm->arch_vm.nworld_eptp;
|
||||
@ -80,7 +80,7 @@ int ept_violation_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
||||
|
||||
/* Handle page fault from guest */
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
exit_qual = vcpu->arch.exit_qualification;
|
||||
|
||||
io_req->type = REQ_MMIO;
|
||||
|
||||
|
@ -1631,7 +1631,7 @@ static int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
|
||||
static int vie_init(struct instr_emul_vie *vie, struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t guest_rip_gva = vcpu_get_rip(vcpu);
|
||||
uint32_t inst_len = vcpu->arch_vcpu.inst_len;
|
||||
uint32_t inst_len = vcpu->arch.inst_len;
|
||||
uint32_t err_code;
|
||||
uint64_t fault_addr;
|
||||
int ret;
|
||||
|
@ -13,7 +13,7 @@ vm_sw_loader_t vm_sw_loader;
|
||||
inline uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
|
||||
{
|
||||
const struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
return ctx->guest_cpu_regs.longs[reg];
|
||||
}
|
||||
@ -21,7 +21,7 @@ inline uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
|
||||
inline void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
ctx->guest_cpu_regs.longs[reg] = val;
|
||||
}
|
||||
@ -29,7 +29,7 @@ inline void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) == 0 &&
|
||||
bitmap_test_and_set_lock(CPU_REG_RIP, &vcpu->reg_cached) == 0)
|
||||
@ -39,14 +39,14 @@ inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
|
||||
inline void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rip = val;
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val;
|
||||
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
return ctx->guest_cpu_regs.regs.rsp;
|
||||
}
|
||||
@ -54,7 +54,7 @@ inline uint64_t vcpu_get_rsp(struct acrn_vcpu *vcpu)
|
||||
inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
ctx->guest_cpu_regs.regs.rsp = val;
|
||||
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
@ -63,7 +63,7 @@ inline void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test(CPU_REG_EFER, &vcpu->reg_updated) == 0 &&
|
||||
bitmap_test_and_set_lock(CPU_REG_EFER, &vcpu->reg_cached) == 0)
|
||||
@ -73,7 +73,7 @@ inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
|
||||
|
||||
inline void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.ia32_efer
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.ia32_efer
|
||||
= val;
|
||||
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
|
||||
}
|
||||
@ -81,7 +81,7 @@ inline void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) == 0 &&
|
||||
bitmap_test_and_set_lock(CPU_REG_RFLAGS,
|
||||
@ -92,7 +92,7 @@ inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
|
||||
|
||||
inline void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.rflags =
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rflags =
|
||||
val;
|
||||
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
|
||||
}
|
||||
@ -101,7 +101,7 @@ inline uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t mask;
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test_and_set_lock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
|
||||
mask = exec_vmread(VMX_CR0_MASK);
|
||||
@ -119,19 +119,19 @@ inline void vcpu_set_cr0(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
inline uint64_t vcpu_get_cr2(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->
|
||||
arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2;
|
||||
arch.contexts[vcpu->arch.cur_context].run_ctx.cr2;
|
||||
}
|
||||
|
||||
inline void vcpu_set_cr2(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx.cr2 = val;
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2 = val;
|
||||
}
|
||||
|
||||
inline uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint64_t mask;
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
if (bitmap_test_and_set_lock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
|
||||
mask = exec_vmread(VMX_CR4_MASK);
|
||||
@ -148,13 +148,13 @@ inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
|
||||
inline uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].
|
||||
return vcpu->arch.contexts[vcpu->arch.cur_context].
|
||||
ext_ctx.ia32_pat;
|
||||
}
|
||||
|
||||
inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx.ia32_pat
|
||||
vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx.ia32_pat
|
||||
= val;
|
||||
}
|
||||
|
||||
@ -168,13 +168,13 @@ static void set_vcpu_mode(struct acrn_vcpu *vcpu, uint32_t cs_attr, uint64_t ia3
|
||||
{
|
||||
if (ia32_efer & MSR_IA32_EFER_LMA_BIT) {
|
||||
if (cs_attr & 0x2000) /* CS.L = 1 */
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_64BIT;
|
||||
vcpu->arch.cpu_mode = CPU_MODE_64BIT;
|
||||
else
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_COMPATIBILITY;
|
||||
vcpu->arch.cpu_mode = CPU_MODE_COMPATIBILITY;
|
||||
} else if (cr0 & CR0_PE) {
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_PROTECTED;
|
||||
vcpu->arch.cpu_mode = CPU_MODE_PROTECTED;
|
||||
} else {
|
||||
vcpu->arch_vcpu.cpu_mode = CPU_MODE_REAL;
|
||||
vcpu->arch.cpu_mode = CPU_MODE_REAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,8 +186,8 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
|
||||
struct segment_sel *seg;
|
||||
uint32_t limit, attr;
|
||||
|
||||
ectx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx);
|
||||
ctx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx);
|
||||
ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||
ctx = &(vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx);
|
||||
|
||||
/* NOTE:
|
||||
* This is to set the attr and limit to default value.
|
||||
@ -289,7 +289,7 @@ void set_ap_entry(struct acrn_vcpu *vcpu, uint64_t entry)
|
||||
{
|
||||
struct ext_context *ectx;
|
||||
|
||||
ectx = &(vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx);
|
||||
ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||
ectx->cs.selector = (uint16_t)((entry >> 4U) & 0xFFFFU);
|
||||
ectx->cs.base = ectx->cs.selector << 4U;
|
||||
|
||||
@ -353,13 +353,13 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
|
||||
vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id,
|
||||
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
|
||||
|
||||
vcpu->arch_vcpu.vpid = allocate_vpid();
|
||||
vcpu->arch.vpid = allocate_vpid();
|
||||
|
||||
/* Initialize exception field in VCPU context */
|
||||
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
|
||||
vcpu->arch.exception_info.exception = VECTOR_INVALID;
|
||||
|
||||
/* Initialize cur context */
|
||||
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
|
||||
vcpu->arch.cur_context = NORMAL_WORLD;
|
||||
|
||||
/* Create per vcpu vlapic */
|
||||
vlapic_create(vcpu);
|
||||
@ -374,7 +374,7 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct acrn_vcpu **rtn_vcpu_han
|
||||
vcpu->launched = false;
|
||||
vcpu->paused_cnt = 0U;
|
||||
vcpu->running = 0;
|
||||
vcpu->arch_vcpu.nr_sipi = 0;
|
||||
vcpu->arch.nr_sipi = 0;
|
||||
vcpu->pending_pre_work = 0U;
|
||||
vcpu->state = VCPU_INIT;
|
||||
|
||||
@ -392,7 +392,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
|
||||
uint32_t instlen, cs_attr;
|
||||
uint64_t rip, ia32_efer, cr0;
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
int64_t status = 0;
|
||||
|
||||
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated))
|
||||
@ -409,8 +409,8 @@ int run_vcpu(struct acrn_vcpu *vcpu)
|
||||
pr_info("VM %d Starting VCPU %hu",
|
||||
vcpu->vm->vm_id, vcpu->vcpu_id);
|
||||
|
||||
if (vcpu->arch_vcpu.vpid)
|
||||
exec_vmwrite16(VMX_VPID, vcpu->arch_vcpu.vpid);
|
||||
if (vcpu->arch.vpid)
|
||||
exec_vmwrite16(VMX_VPID, vcpu->arch.vpid);
|
||||
|
||||
/*
|
||||
* A power-up or a reset invalidates all linear mappings,
|
||||
@ -447,7 +447,7 @@ int run_vcpu(struct acrn_vcpu *vcpu)
|
||||
/* This VCPU was already launched, check if the last guest
|
||||
* instruction needs to be repeated and resume VCPU accordingly
|
||||
*/
|
||||
instlen = vcpu->arch_vcpu.inst_len;
|
||||
instlen = vcpu->arch.inst_len;
|
||||
rip = vcpu_get_rip(vcpu);
|
||||
exec_vmwrite(VMX_GUEST_RIP, ((rip+(uint64_t)instlen) &
|
||||
0xFFFFFFFFFFFFFFFFUL));
|
||||
@ -467,17 +467,17 @@ int run_vcpu(struct acrn_vcpu *vcpu)
|
||||
set_vcpu_mode(vcpu, cs_attr, ia32_efer, cr0);
|
||||
|
||||
/* Obtain current VCPU instruction length */
|
||||
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
||||
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
||||
|
||||
ctx->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
|
||||
|
||||
/* Obtain VM exit reason */
|
||||
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
||||
vcpu->arch.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
||||
|
||||
if (status != 0) {
|
||||
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
|
||||
if (vcpu->arch_vcpu.exit_reason & VMX_VMENTRY_FAIL)
|
||||
pr_fatal("vmentry fail reason=%lx", vcpu->arch_vcpu.exit_reason);
|
||||
if (vcpu->arch.exit_reason & VMX_VMENTRY_FAIL)
|
||||
pr_fatal("vmentry fail reason=%lx", vcpu->arch.exit_reason);
|
||||
else
|
||||
pr_fatal("vmexit fail err_inst=%x", exec_vmread32(VMX_INSTR_ERROR));
|
||||
|
||||
@ -525,20 +525,20 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
|
||||
vcpu->launched = false;
|
||||
vcpu->paused_cnt = 0U;
|
||||
vcpu->running = 0;
|
||||
vcpu->arch_vcpu.nr_sipi = 0;
|
||||
vcpu->arch.nr_sipi = 0;
|
||||
vcpu->pending_pre_work = 0U;
|
||||
|
||||
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
|
||||
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
|
||||
vcpu->arch_vcpu.irq_window_enabled = 0;
|
||||
vcpu->arch_vcpu.inject_event_pending = false;
|
||||
(void)memset(vcpu->arch_vcpu.vmcs, 0U, CPU_PAGE_SIZE);
|
||||
vcpu->arch.exception_info.exception = VECTOR_INVALID;
|
||||
vcpu->arch.cur_context = NORMAL_WORLD;
|
||||
vcpu->arch.irq_window_enabled = 0;
|
||||
vcpu->arch.inject_event_pending = false;
|
||||
(void)memset(vcpu->arch.vmcs, 0U, CPU_PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < NR_WORLD; i++) {
|
||||
(void)memset(&vcpu->arch_vcpu.contexts[i], 0U,
|
||||
(void)memset(&vcpu->arch.contexts[i], 0U,
|
||||
sizeof(struct run_context));
|
||||
}
|
||||
vcpu->arch_vcpu.cur_context = NORMAL_WORLD;
|
||||
vcpu->arch.cur_context = NORMAL_WORLD;
|
||||
|
||||
vlapic = vcpu_vlapic(vcpu);
|
||||
vlapic_reset(vlapic);
|
||||
|
@ -499,7 +499,7 @@ vlapic_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
|
||||
* it to vCPU in next vmentry.
|
||||
*/
|
||||
bitmap_set_lock(ACRN_REQUEST_EVENT,
|
||||
&vlapic->vcpu->arch_vcpu.pending_req);
|
||||
&vlapic->vcpu->arch.pending_req);
|
||||
vlapic_post_intr(vlapic->vcpu->pcpu_id);
|
||||
return 0;
|
||||
}
|
||||
@ -564,7 +564,7 @@ uint64_t apicv_get_pir_desc_paddr(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct acrn_vlapic *vlapic;
|
||||
|
||||
vlapic = &vcpu->arch_vcpu.vlapic;
|
||||
vlapic = &vcpu->arch.vlapic;
|
||||
return hva2hpa(&(vlapic->pir_desc));
|
||||
}
|
||||
|
||||
@ -1189,11 +1189,11 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
|
||||
* the second SIPI will be ignored as it move out of
|
||||
* wait-for-SIPI state.
|
||||
*/
|
||||
target_vcpu->arch_vcpu.nr_sipi = 1U;
|
||||
target_vcpu->arch.nr_sipi = 1U;
|
||||
} else if (mode == APIC_DELMODE_STARTUP) {
|
||||
/* Ignore SIPIs in any state other than wait-for-SIPI */
|
||||
if ((target_vcpu->state != VCPU_INIT) ||
|
||||
(target_vcpu->arch_vcpu.nr_sipi == 0U)) {
|
||||
(target_vcpu->arch.nr_sipi == 0U)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1202,8 +1202,8 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode,
|
||||
target_vcpu->vcpu_id, vcpu_id,
|
||||
(icr_low & APIC_VECTOR_MASK));
|
||||
|
||||
target_vcpu->arch_vcpu.nr_sipi--;
|
||||
if (target_vcpu->arch_vcpu.nr_sipi > 0U) {
|
||||
target_vcpu->arch.nr_sipi--;
|
||||
if (target_vcpu->arch.nr_sipi > 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2238,8 +2238,8 @@ vlapic_wrmsr(struct acrn_vcpu *vcpu, uint32_t msr, uint64_t wval)
|
||||
|
||||
int vlapic_create(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch_vcpu.vlapic.vm = vcpu->vm;
|
||||
vcpu->arch_vcpu.vlapic.vcpu = vcpu;
|
||||
vcpu->arch.vlapic.vm = vcpu->vm;
|
||||
vcpu->arch.vlapic.vcpu = vcpu;
|
||||
|
||||
if (is_vcpu_bsp(vcpu)) {
|
||||
uint64_t *pml4_page =
|
||||
@ -2450,7 +2450,7 @@ int apic_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
struct acrn_vlapic *vlapic;
|
||||
struct mmio_request *mmio = &vcpu->req.reqs.mmio;
|
||||
|
||||
qual = vcpu->arch_vcpu.exit_qualification;
|
||||
qual = vcpu->arch.exit_qualification;
|
||||
access_type = apic_access_type(qual);
|
||||
|
||||
/*parse offset if linear access*/
|
||||
@ -2500,7 +2500,7 @@ int veoi_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
|
||||
vlapic = vcpu_vlapic(vcpu);
|
||||
lapic = &(vlapic->apic_page);
|
||||
vector = (uint32_t)(vcpu->arch_vcpu.exit_qualification & 0xFFUL);
|
||||
vector = (uint32_t)(vcpu->arch.exit_qualification & 0xFFUL);
|
||||
|
||||
tmrptr = &lapic->tmr[0];
|
||||
idx = vector >> 5U;
|
||||
@ -2535,7 +2535,7 @@ int apic_write_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint32_t offset;
|
||||
struct acrn_vlapic *vlapic = NULL;
|
||||
|
||||
qual = vcpu->arch_vcpu.exit_qualification;
|
||||
qual = vcpu->arch.exit_qualification;
|
||||
offset = (uint32_t)(qual & 0xFFFUL);
|
||||
|
||||
handled = 1;
|
||||
|
@ -261,7 +261,7 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
case MSR_IA32_TSC_AUX:
|
||||
{
|
||||
v = vcpu->arch_vcpu.msr_tsc_aux;
|
||||
v = vcpu->arch.msr_tsc_aux;
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_APIC_BASE:
|
||||
@ -398,7 +398,7 @@ int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
case MSR_IA32_TSC_AUX:
|
||||
{
|
||||
vcpu->arch_vcpu.msr_tsc_aux = v;
|
||||
vcpu->arch.msr_tsc_aux = v;
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_APIC_BASE:
|
||||
|
@ -361,11 +361,11 @@ int32_t pio_instr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t status;
|
||||
uint64_t exit_qual;
|
||||
int32_t cur_context_idx = vcpu->arch_vcpu.cur_context;
|
||||
int32_t cur_context_idx = vcpu->arch.cur_context;
|
||||
struct io_request *io_req = &vcpu->req;
|
||||
struct pio_request *pio_req = &io_req->reqs.pio;
|
||||
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
exit_qual = vcpu->arch.exit_qualification;
|
||||
|
||||
io_req->type = REQ_PORTIO;
|
||||
pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
|
||||
|
@ -293,17 +293,17 @@ static void copy_smc_param(const struct run_context *prev_ctx,
|
||||
|
||||
void switch_world(struct acrn_vcpu *vcpu, int next_world)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
struct acrn_vcpu_arch *arch = &vcpu->arch;
|
||||
|
||||
/* save previous world context */
|
||||
save_world_ctx(vcpu, &arch_vcpu->contexts[!next_world].ext_ctx);
|
||||
save_world_ctx(vcpu, &arch->contexts[!next_world].ext_ctx);
|
||||
|
||||
/* load next world context */
|
||||
load_world_ctx(vcpu, &arch_vcpu->contexts[next_world].ext_ctx);
|
||||
load_world_ctx(vcpu, &arch->contexts[next_world].ext_ctx);
|
||||
|
||||
/* Copy SMC parameters: RDI, RSI, RDX, RBX */
|
||||
copy_smc_param(&arch_vcpu->contexts[!next_world].run_ctx,
|
||||
&arch_vcpu->contexts[next_world].run_ctx);
|
||||
copy_smc_param(&arch->contexts[!next_world].run_ctx,
|
||||
&arch->contexts[next_world].run_ctx);
|
||||
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
/* load EPTP for next world */
|
||||
@ -321,7 +321,7 @@ void switch_world(struct acrn_vcpu *vcpu, int next_world)
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
arch_vcpu->cur_context = next_world;
|
||||
arch->cur_context = next_world;
|
||||
}
|
||||
|
||||
/* Put key_info and trusty_startup_param in the first Page of Trusty
|
||||
@ -370,7 +370,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu,
|
||||
* address(GPA) of startup_param on boot. Currently, the startup_param
|
||||
* is put in the first page of trusty memory just followed by key_info.
|
||||
*/
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rdi
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rdi
|
||||
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
|
||||
|
||||
return true;
|
||||
@ -386,14 +386,14 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu,
|
||||
uint64_t base_hpa,
|
||||
uint32_t size)
|
||||
{
|
||||
vcpu->arch_vcpu.inst_len = 0U;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp =
|
||||
vcpu->arch.inst_len = 0U;
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp =
|
||||
TRUSTY_EPT_REBASE_GPA + size;
|
||||
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].ext_ctx.ia32_pat =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx.ia32_pat;
|
||||
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
|
||||
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.ia32_pat =
|
||||
vcpu->arch.contexts[NORMAL_WORLD].ext_ctx.ia32_pat;
|
||||
|
||||
return setup_trusty_info(vcpu, size, base_hpa);
|
||||
}
|
||||
@ -444,7 +444,7 @@ bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
|
||||
hva2hpa(vm->arch_vm.sworld_eptp) | (3UL << 3U) | 0x6UL);
|
||||
|
||||
/* save Normal World context */
|
||||
save_world_ctx(vcpu, &vcpu->arch_vcpu.contexts[NORMAL_WORLD].ext_ctx);
|
||||
save_world_ctx(vcpu, &vcpu->arch.contexts[NORMAL_WORLD].ext_ctx);
|
||||
|
||||
/* init secure world environment */
|
||||
if (init_secure_world_env(vcpu,
|
||||
@ -452,7 +452,7 @@ bool initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
|
||||
trusty_base_hpa, trusty_mem_size)) {
|
||||
|
||||
/* switch to Secure World */
|
||||
vcpu->arch_vcpu.cur_context = SECURE_WORLD;
|
||||
vcpu->arch.cur_context = SECURE_WORLD;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -481,7 +481,7 @@ void save_sworld_context(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)memcpy_s(&vcpu->vm->sworld_snapshot,
|
||||
sizeof(struct cpu_context),
|
||||
&vcpu->arch_vcpu.contexts[SECURE_WORLD],
|
||||
&vcpu->arch.contexts[SECURE_WORLD],
|
||||
sizeof(struct cpu_context));
|
||||
}
|
||||
|
||||
@ -495,7 +495,7 @@ void restore_sworld_context(struct acrn_vcpu *vcpu)
|
||||
sworld_ctl->sworld_memory.length,
|
||||
TRUSTY_EPT_REBASE_GPA);
|
||||
|
||||
(void)memcpy_s(&vcpu->arch_vcpu.contexts[SECURE_WORLD],
|
||||
(void)memcpy_s(&vcpu->arch.contexts[SECURE_WORLD],
|
||||
sizeof(struct cpu_context),
|
||||
&vcpu->vm->sworld_snapshot,
|
||||
sizeof(struct cpu_context));
|
||||
|
@ -88,12 +88,12 @@ static bool vcpu_pending_request(struct acrn_vcpu *vcpu)
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_EVENT);
|
||||
}
|
||||
|
||||
return vcpu->arch_vcpu.pending_req != 0UL;
|
||||
return vcpu->arch.pending_req != 0UL;
|
||||
}
|
||||
|
||||
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
|
||||
{
|
||||
bitmap_set_lock(eventid, &vcpu->arch_vcpu.pending_req);
|
||||
bitmap_set_lock(eventid, &vcpu->arch.pending_req);
|
||||
/*
|
||||
* if current hostcpu is not the target vcpu's hostcpu, we need
|
||||
* to invoke IPI to wake up target vcpu
|
||||
@ -207,7 +207,7 @@ static int get_excep_class(uint32_t vector)
|
||||
|
||||
int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_code)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
struct acrn_vcpu_arch *arch = &vcpu->arch;
|
||||
/* VECTOR_INVALID is also greater than 32 */
|
||||
if (vector >= 32U) {
|
||||
pr_err("invalid exception vector %d", vector);
|
||||
@ -215,7 +215,7 @@ int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_c
|
||||
}
|
||||
|
||||
uint32_t prev_vector =
|
||||
arch_vcpu->exception_info.exception;
|
||||
arch->exception_info.exception;
|
||||
int32_t new_class, prev_class;
|
||||
|
||||
/* SDM vol3 - 6.15, Table 6-5 - conditions for generating a
|
||||
@ -239,12 +239,12 @@ int vcpu_queue_exception(struct acrn_vcpu *vcpu, uint32_t vector, uint32_t err_c
|
||||
* double/triple fault. */
|
||||
}
|
||||
|
||||
arch_vcpu->exception_info.exception = vector;
|
||||
arch->exception_info.exception = vector;
|
||||
|
||||
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
|
||||
arch_vcpu->exception_info.error = err_code;
|
||||
arch->exception_info.error = err_code;
|
||||
} else {
|
||||
arch_vcpu->exception_info.error = 0U;
|
||||
arch->exception_info.error = 0U;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -254,13 +254,13 @@ static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
vcpu->arch_vcpu.exception_info.error);
|
||||
vcpu->arch.exception_info.error);
|
||||
}
|
||||
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
|
||||
(exception_type[vector] << 8U) | (vector & 0xFFU));
|
||||
|
||||
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
|
||||
vcpu->arch.exception_info.exception = VECTOR_INVALID;
|
||||
|
||||
/* retain rip for exception injection */
|
||||
vcpu_retain_rip(vcpu);
|
||||
@ -268,7 +268,7 @@ static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
|
||||
static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
|
||||
uint32_t vector = vcpu->arch.exception_info.exception;
|
||||
|
||||
if (vector == IDT_MC || vector == IDT_BP || vector == IDT_DB) {
|
||||
vcpu_inject_exception(vcpu, vector);
|
||||
@ -280,7 +280,7 @@ static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
|
||||
|
||||
static int vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch_vcpu.exception_info.exception;
|
||||
uint32_t vector = vcpu->arch.exception_info.exception;
|
||||
|
||||
/* high priority exception already be injected */
|
||||
if (vector <= NR_MAX_VECTOR) {
|
||||
@ -348,7 +348,7 @@ int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
/* Disable interrupt-window exiting first.
|
||||
* acrn_handle_pending_request will continue handle for this vcpu
|
||||
*/
|
||||
vcpu->arch_vcpu.irq_window_enabled = 0U;
|
||||
vcpu->arch.irq_window_enabled = 0U;
|
||||
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN);
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
@ -395,8 +395,8 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
uint32_t tmp;
|
||||
uint32_t intr_info;
|
||||
uint32_t error_code;
|
||||
struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu;
|
||||
uint64_t *pending_req_bits = &arch_vcpu->pending_req;
|
||||
struct acrn_vcpu_arch * arch = &vcpu->arch;
|
||||
uint64_t *pending_req_bits = &arch->pending_req;
|
||||
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu);
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT,
|
||||
@ -412,7 +412,7 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_VPID_FLUSH,
|
||||
pending_req_bits)) {
|
||||
flush_vpid_single(arch_vcpu->vpid);
|
||||
flush_vpid_single(arch->vpid);
|
||||
}
|
||||
|
||||
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TMR_UPDATE,
|
||||
@ -421,18 +421,18 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* handling cancelled event injection when vcpu is switched out */
|
||||
if (arch_vcpu->inject_event_pending) {
|
||||
if ((arch_vcpu->inject_info.intr_info &
|
||||
if (arch->inject_event_pending) {
|
||||
if ((arch->inject_info.intr_info &
|
||||
(EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
|
||||
error_code = arch_vcpu->inject_info.error_code;
|
||||
error_code = arch->inject_info.error_code;
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
error_code);
|
||||
}
|
||||
|
||||
intr_info = arch_vcpu->inject_info.intr_info;
|
||||
intr_info = arch->inject_info.intr_info;
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, intr_info);
|
||||
|
||||
arch_vcpu->inject_event_pending = false;
|
||||
arch->inject_event_pending = false;
|
||||
goto INTR_WIN;
|
||||
}
|
||||
|
||||
@ -477,9 +477,9 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
* - external interrupt, if IF clear, will keep in IDT_VEC_INFO_FIELD
|
||||
* at next vm exit?
|
||||
*/
|
||||
if ((arch_vcpu->idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
if ((arch->idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
arch_vcpu->idt_vectoring_info);
|
||||
arch->idt_vectoring_info);
|
||||
goto INTR_WIN;
|
||||
}
|
||||
|
||||
@ -525,7 +525,7 @@ INTR_WIN:
|
||||
* an ExtInt or there is lapic interrupt and virtual interrupt
|
||||
* deliver is disabled.
|
||||
*/
|
||||
if (arch_vcpu->irq_window_enabled == 1U) {
|
||||
if (arch->irq_window_enabled == 1U) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ INTR_WIN:
|
||||
tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
tmp |= VMX_PROCBASED_CTLS_IRQ_WIN;
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp);
|
||||
arch_vcpu->irq_window_enabled = 1U;
|
||||
arch->irq_window_enabled = 1U;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -557,14 +557,14 @@ void cancel_event_injection(struct acrn_vcpu *vcpu)
|
||||
* call.
|
||||
*/
|
||||
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
|
||||
vcpu->arch_vcpu.inject_event_pending = true;
|
||||
vcpu->arch.inject_event_pending = true;
|
||||
|
||||
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
|
||||
vcpu->arch_vcpu.inject_info.error_code =
|
||||
vcpu->arch.inject_info.error_code =
|
||||
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
|
||||
}
|
||||
|
||||
vcpu->arch_vcpu.inject_info.intr_info = intinfo;
|
||||
vcpu->arch.inject_info.intr_info = intinfo;
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
|
||||
}
|
||||
}
|
||||
|
@ -163,11 +163,11 @@ int vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Obtain interrupt info */
|
||||
vcpu->arch_vcpu.idt_vectoring_info =
|
||||
vcpu->arch.idt_vectoring_info =
|
||||
exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
|
||||
/* Filter out HW exception & NMI */
|
||||
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
|
||||
if ((vcpu->arch.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
uint32_t vector_info = vcpu->arch.idt_vectoring_info;
|
||||
uint32_t vector = vector_info & 0xffU;
|
||||
uint32_t type = (vector_info & VMX_INT_TYPE_MASK) >> 8U;
|
||||
uint32_t err_code = 0U;
|
||||
@ -177,25 +177,25 @@ int vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
err_code = exec_vmread32(VMX_IDT_VEC_ERROR_CODE);
|
||||
}
|
||||
(void)vcpu_queue_exception(vcpu, vector, err_code);
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0U;
|
||||
vcpu->arch.idt_vectoring_info = 0U;
|
||||
} else if (type == VMX_INT_TYPE_NMI) {
|
||||
vcpu_make_request(vcpu, ACRN_REQUEST_NMI);
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0U;
|
||||
vcpu->arch.idt_vectoring_info = 0U;
|
||||
} else {
|
||||
/* No action on EXT_INT or SW exception. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Calculate basic exit reason (low 16-bits) */
|
||||
basic_exit_reason = (uint16_t)(vcpu->arch_vcpu.exit_reason & 0xFFFFU);
|
||||
basic_exit_reason = (uint16_t)(vcpu->arch.exit_reason & 0xFFFFU);
|
||||
|
||||
/* Log details for exit */
|
||||
pr_dbg("Exit Reason: 0x%016llx ", vcpu->arch_vcpu.exit_reason);
|
||||
pr_dbg("Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
|
||||
|
||||
/* Ensure exit reason is within dispatch table */
|
||||
if (basic_exit_reason >= ARRAY_SIZE(dispatch_table)) {
|
||||
pr_err("Invalid Exit Reason: 0x%016llx ",
|
||||
vcpu->arch_vcpu.exit_reason);
|
||||
vcpu->arch.exit_reason);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ int vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
if (dispatch->need_exit_qualification != 0U) {
|
||||
/* Get exit qualification */
|
||||
vcpu->arch_vcpu.exit_qualification =
|
||||
vcpu->arch.exit_qualification =
|
||||
exec_vmread(VMX_EXIT_QUALIFICATION);
|
||||
}
|
||||
|
||||
@ -232,12 +232,12 @@ static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
pr_fatal("Error: Unhandled VM exit condition from guest at 0x%016llx ",
|
||||
exec_vmread(VMX_GUEST_RIP));
|
||||
|
||||
pr_fatal("Exit Reason: 0x%016llx ", vcpu->arch_vcpu.exit_reason);
|
||||
pr_fatal("Exit Reason: 0x%016llx ", vcpu->arch.exit_reason);
|
||||
|
||||
pr_err("Exit qualification: 0x%016llx ",
|
||||
exec_vmread(VMX_EXIT_QUALIFICATION));
|
||||
|
||||
TRACE_2L(TRACE_VMEXIT_UNHANDLED, vcpu->arch_vcpu.exit_reason, 0UL);
|
||||
TRACE_2L(TRACE_VMEXIT_UNHANDLED, vcpu->arch.exit_reason, 0UL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -268,7 +268,7 @@ int cr_access_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint32_t idx;
|
||||
uint64_t exit_qual;
|
||||
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
exit_qual = vcpu->arch.exit_qualification;
|
||||
idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual);
|
||||
|
||||
ASSERT((idx <= 15U), "index out of range");
|
||||
@ -328,7 +328,7 @@ static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
idx = vcpu->arch_vcpu.cur_context;
|
||||
idx = vcpu->arch.cur_context;
|
||||
if (idx >= NR_WORLD) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
vmxon_region_pa = hva2hpa(vmxon_region_va);
|
||||
exec_vmxon(&vmxon_region_pa);
|
||||
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
|
||||
exec_vmptrld(&vmcs_pa);
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ void vmx_off(uint16_t pcpu_id)
|
||||
struct acrn_vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
|
||||
uint64_t vmcs_pa;
|
||||
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
|
||||
exec_vmclear((void *)&vmcs_pa);
|
||||
|
||||
asm volatile ("vmxoff" : : : "memory");
|
||||
@ -549,7 +549,7 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
uint64_t cr4)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
struct ext_context *ectx = &ctx->ext_ctx;
|
||||
|
||||
vcpu_set_cr4(vcpu, cr4);
|
||||
@ -593,7 +593,7 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
static void init_guest_state(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
|
||||
init_guest_vmx(vcpu, ctx->run_ctx.cr0, ctx->ext_ctx.cr3,
|
||||
ctx->run_ctx.cr4 & ~CR4_VMXE);
|
||||
@ -825,7 +825,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
VMX_PROCBASED_CTLS2_UNRESTRICT|
|
||||
VMX_PROCBASED_CTLS2_VAPIC_REGS);
|
||||
|
||||
if (vcpu->arch_vcpu.vpid != 0U) {
|
||||
if (vcpu->arch.vpid != 0U) {
|
||||
value32 |= VMX_PROCBASED_CTLS2_VPID;
|
||||
} else {
|
||||
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
|
||||
@ -1038,10 +1038,10 @@ void init_vmcs(struct acrn_vcpu *vcpu)
|
||||
|
||||
/* Obtain the VM Rev ID from HW and populate VMCS page with it */
|
||||
vmx_rev_id = msr_read(MSR_IA32_VMX_BASIC);
|
||||
(void)memcpy_s(vcpu->arch_vcpu.vmcs, 4U, (void *)&vmx_rev_id, 4U);
|
||||
(void)memcpy_s(vcpu->arch.vmcs, 4U, (void *)&vmx_rev_id, 4U);
|
||||
|
||||
/* Execute VMCLEAR on current VMCS */
|
||||
vmcs_pa = hva2hpa(vcpu->arch_vcpu.vmcs);
|
||||
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
|
||||
exec_vmclear((void *)&vmcs_pa);
|
||||
|
||||
/* Load VMCS pointer */
|
||||
|
@ -89,7 +89,7 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
|
||||
vmexit_begin = rdtsc();
|
||||
#endif
|
||||
|
||||
vcpu->arch_vcpu.nrexits++;
|
||||
vcpu->arch.nrexits++;
|
||||
/* Save guest TSC_AUX */
|
||||
cpu_msr_read(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
|
||||
/* Restore native TSC_AUX */
|
||||
@ -98,7 +98,7 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
|
||||
CPU_IRQ_ENABLE();
|
||||
/* Dispatch handler */
|
||||
ret = vmexit_handler(vcpu);
|
||||
basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
|
||||
basic_exit_reason = vcpu->arch.exit_reason & 0xFFFFU;
|
||||
if (ret < 0) {
|
||||
pr_fatal("dispatch VM exit handler failed for reason"
|
||||
" %d, ret = %d!", basic_exit_reason, ret);
|
||||
|
@ -14,7 +14,7 @@
|
||||
*/
|
||||
int32_t hcall_world_switch(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t next_world_id = !(vcpu->arch_vcpu.cur_context);
|
||||
int32_t next_world_id = !(vcpu->arch.cur_context);
|
||||
|
||||
if (next_world_id >= NR_WORLD) {
|
||||
dev_dbg(ACRN_DBG_TRUSTY_HYCALL,
|
||||
@ -56,7 +56,7 @@ int32_t hcall_initialize_trusty(struct acrn_vcpu *vcpu, uint64_t param)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (vcpu->arch_vcpu.cur_context != NORMAL_WORLD) {
|
||||
if (vcpu->arch.cur_context != NORMAL_WORLD) {
|
||||
dev_dbg(ACRN_DBG_TRUSTY_HYCALL,
|
||||
"%s, must initialize Trusty from Normal World!\n",
|
||||
__func__);
|
||||
|
@ -121,7 +121,7 @@ int general_sw_loader(struct vm *vm)
|
||||
/* calculate the kernel entry point */
|
||||
zeropage = (struct zero_page *)sw_kernel->kernel_src_addr;
|
||||
kernel_entry_offset = (uint32_t)(zeropage->hdr.setup_sects + 1U) * 512U;
|
||||
if (vcpu->arch_vcpu.cpu_mode == CPU_MODE_64BIT) {
|
||||
if (vcpu->arch.cpu_mode == CPU_MODE_64BIT) {
|
||||
/* 64bit entry is the 512bytes after the start */
|
||||
kernel_entry_offset += 512U;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static void dump_guest_reg(struct acrn_vcpu *vcpu)
|
||||
printf("= VM ID %d ==== vCPU ID %hu === pCPU ID %d ===="
|
||||
"world %d =============\r\n",
|
||||
vcpu->vm->vm_id, vcpu->vcpu_id, vcpu->pcpu_id,
|
||||
vcpu->arch_vcpu.cur_context);
|
||||
vcpu->arch.cur_context);
|
||||
printf("= RIP=0x%016llx RSP=0x%016llx "
|
||||
"RFLAGS=0x%016llx\r\n",
|
||||
vcpu_get_rip(vcpu),
|
||||
|
@ -176,7 +176,7 @@ struct cpu_context {
|
||||
struct ext_context ext_ctx;
|
||||
};
|
||||
|
||||
struct vcpu_arch {
|
||||
struct acrn_vcpu_arch {
|
||||
/* vmcs region for this vcpu, MUST be 4KB-aligned */
|
||||
uint8_t vmcs[CPU_PAGE_SIZE];
|
||||
/* per vcpu lapic */
|
||||
@ -222,7 +222,7 @@ struct vcpu_arch {
|
||||
struct vm;
|
||||
struct acrn_vcpu {
|
||||
/* Architecture specific definitions for this VCPU */
|
||||
struct vcpu_arch arch_vcpu;
|
||||
struct acrn_vcpu_arch arch;
|
||||
uint16_t pcpu_id; /* Physical CPU ID of this VCPU */
|
||||
uint16_t vcpu_id; /* virtual identifier for VCPU */
|
||||
struct vm *vm; /* Reference to the VM this VCPU belongs to */
|
||||
@ -272,13 +272,13 @@ static inline bool is_vcpu_bsp(const struct acrn_vcpu *vcpu)
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
static inline void vcpu_retain_rip(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(vcpu)->arch_vcpu.inst_len = 0U;
|
||||
(vcpu)->arch.inst_len = 0U;
|
||||
}
|
||||
|
||||
static inline struct acrn_vlapic *
|
||||
vcpu_vlapic(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return &(vcpu->arch_vcpu.vlapic);
|
||||
return &(vcpu->arch.vlapic);
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
|
@ -468,7 +468,7 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu);
|
||||
|
||||
static inline enum vm_cpu_mode get_vcpu_mode(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch_vcpu.cpu_mode;
|
||||
return vcpu->arch.cpu_mode;
|
||||
}
|
||||
|
||||
static inline bool cpu_has_vmx_unrestricted_guest_cap(void)
|
||||
|
Loading…
Reference in New Issue
Block a user