mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-23 09:47:44 +00:00
hv:move down structures run_context and ext_context
Now the structures(run_context & ext_context) are defined in vcpu.h,and they are used in the lower-layer modules(wakeup.S), this patch move down the structures from vcpu.h to cpu.h to avoid reversed dependency. Tracked-On: #1842 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
committed by
ACRN System Integration
parent
4f98cb03a7
commit
60adef33d3
@@ -268,10 +268,10 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
|
||||
static void copy_smc_param(const struct run_context *prev_ctx,
|
||||
struct run_context *next_ctx)
|
||||
{
|
||||
next_ctx->guest_cpu_regs.regs.rdi = prev_ctx->guest_cpu_regs.regs.rdi;
|
||||
next_ctx->guest_cpu_regs.regs.rsi = prev_ctx->guest_cpu_regs.regs.rsi;
|
||||
next_ctx->guest_cpu_regs.regs.rdx = prev_ctx->guest_cpu_regs.regs.rdx;
|
||||
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
|
||||
next_ctx->cpu_regs.regs.rdi = prev_ctx->cpu_regs.regs.rdi;
|
||||
next_ctx->cpu_regs.regs.rsi = prev_ctx->cpu_regs.regs.rsi;
|
||||
next_ctx->cpu_regs.regs.rdx = prev_ctx->cpu_regs.regs.rdx;
|
||||
next_ctx->cpu_regs.regs.rbx = prev_ctx->cpu_regs.regs.rbx;
|
||||
}
|
||||
|
||||
void switch_world(struct acrn_vcpu *vcpu, int32_t next_world)
|
||||
@@ -344,7 +344,7 @@ static bool setup_trusty_info(struct acrn_vcpu *vcpu, uint32_t mem_size, uint64_
|
||||
* address(GPA) of startup_param on boot. Currently, the startup_param
|
||||
* is put in the first page of trusty memory just followed by key_info.
|
||||
*/
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rdi
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rdi
|
||||
= (uint64_t)TRUSTY_EPT_REBASE_GPA + sizeof(struct trusty_key_info);
|
||||
|
||||
stac();
|
||||
@@ -378,7 +378,7 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu,
|
||||
|
||||
vcpu->arch.inst_len = 0U;
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.rip = entry_gpa;
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.guest_cpu_regs.regs.rsp =
|
||||
vcpu->arch.contexts[SECURE_WORLD].run_ctx.cpu_regs.regs.rsp =
|
||||
TRUSTY_EPT_REBASE_GPA + size;
|
||||
|
||||
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
|
||||
@@ -453,10 +453,8 @@ bool initialize_trusty(struct acrn_vcpu *vcpu, struct trusty_boot_param *boot_pa
|
||||
|
||||
void save_sworld_context(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
(void)memcpy_s(&vcpu->vm->sworld_snapshot,
|
||||
sizeof(struct cpu_context),
|
||||
&vcpu->arch.contexts[SECURE_WORLD],
|
||||
sizeof(struct cpu_context));
|
||||
(void)memcpy_s((void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context),
|
||||
(void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context));
|
||||
}
|
||||
|
||||
void restore_sworld_context(struct acrn_vcpu *vcpu)
|
||||
@@ -469,10 +467,8 @@ void restore_sworld_context(struct acrn_vcpu *vcpu)
|
||||
sworld_ctl->sworld_memory.length,
|
||||
TRUSTY_EPT_REBASE_GPA);
|
||||
|
||||
(void)memcpy_s(&vcpu->arch.contexts[SECURE_WORLD],
|
||||
sizeof(struct cpu_context),
|
||||
&vcpu->vm->sworld_snapshot,
|
||||
sizeof(struct cpu_context));
|
||||
(void)memcpy_s((void *)&vcpu->arch.contexts[SECURE_WORLD], sizeof(struct guest_cpu_context),
|
||||
(void *)&vcpu->vm->sworld_snapshot, sizeof(struct guest_cpu_context));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -38,7 +38,7 @@ uint64_t vcpu_get_gpreg(const struct acrn_vcpu *vcpu, uint32_t reg)
|
||||
const struct run_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
return ctx->guest_cpu_regs.longs[reg];
|
||||
return ctx->cpu_regs.longs[reg];
|
||||
}
|
||||
|
||||
void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
@@ -46,7 +46,7 @@ void vcpu_set_gpreg(struct acrn_vcpu *vcpu, uint32_t reg, uint64_t val)
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
ctx->guest_cpu_regs.longs[reg] = val;
|
||||
ctx->cpu_regs.longs[reg] = val;
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
|
||||
@@ -72,7 +72,7 @@ uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu)
|
||||
const struct run_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
return ctx->guest_cpu_regs.regs.rsp;
|
||||
return ctx->cpu_regs.regs.rsp;
|
||||
}
|
||||
|
||||
void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
@@ -80,7 +80,7 @@ void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
|
||||
struct run_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
|
||||
|
||||
ctx->guest_cpu_regs.regs.rsp = val;
|
||||
ctx->cpu_regs.regs.rsp = val;
|
||||
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
|
||||
ectx->ldtr.attr = LDTR_AR;
|
||||
ectx->tr.attr = TR_AR;
|
||||
|
||||
(void)memcpy_s((void *)&(ctx->guest_cpu_regs), sizeof(struct acrn_gp_regs),
|
||||
(void)memcpy_s((void *)&(ctx->cpu_regs), sizeof(struct acrn_gp_regs),
|
||||
(void *)&(vcpu_regs->gprs), sizeof(struct acrn_gp_regs));
|
||||
|
||||
vcpu_set_rip(vcpu, vcpu_regs->rip);
|
||||
@@ -478,7 +478,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
|
||||
}
|
||||
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated)) {
|
||||
exec_vmwrite(VMX_GUEST_RSP, ctx->guest_cpu_regs.regs.rsp);
|
||||
exec_vmwrite(VMX_GUEST_RSP, ctx->cpu_regs.regs.rsp);
|
||||
}
|
||||
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated)) {
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
|
||||
@@ -573,7 +573,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
|
||||
/* Obtain current VCPU instruction length */
|
||||
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
||||
|
||||
ctx->guest_cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
|
||||
ctx->cpu_regs.regs.rsp = exec_vmread(VMX_GUEST_RSP);
|
||||
|
||||
/* Obtain VM exit reason */
|
||||
vcpu->arch.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
||||
|
@@ -24,8 +24,7 @@
|
||||
static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
uint64_t cr4)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
struct ext_context *ectx = &ctx->ext_ctx;
|
||||
|
||||
vcpu_set_cr4(vcpu, cr4);
|
||||
@@ -71,8 +70,7 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
|
||||
|
||||
static void init_guest_state(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context];
|
||||
|
||||
init_guest_vmx(vcpu, ctx->run_ctx.cr0, ctx->ext_ctx.cr3,
|
||||
ctx->run_ctx.cr4 & ~(CR4_VMXE | CR4_SMXE | CR4_MCE));
|
||||
|
Reference in New Issue
Block a user