mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 12:12:16 +00:00
trusty: init & switch world fix
- when init, cr0 & cr4 should read from VMCS - when world switch, cr0/cr4 read shadow should also be save/restore v2: - use context->vmx_cr0/cr4 to save/restore VMX_GUEST_CR0/CR4 - use context->cr0/cr4 to save/restore VMX_CR0/CR4_READ_SHADOW Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
75c1573aff
commit
e84d4dee19
@ -177,8 +177,10 @@ static void save_world_ctx(struct run_context *context)
|
||||
{
|
||||
/* VMCS GUEST field */
|
||||
/* TSC_OFFSET, CR3, RIP, RSP, RFLAGS already saved on VMEXIT */
|
||||
context->cr0 = exec_vmread(VMX_GUEST_CR0);
|
||||
context->cr4 = exec_vmread(VMX_GUEST_CR4);
|
||||
context->cr0 = exec_vmread(VMX_CR0_READ_SHADOW);
|
||||
context->cr4 = exec_vmread(VMX_CR4_READ_SHADOW);
|
||||
context->vmx_cr0 = exec_vmread(VMX_GUEST_CR0);
|
||||
context->vmx_cr4 = exec_vmread(VMX_GUEST_CR4);
|
||||
context->dr7 = exec_vmread(VMX_GUEST_DR7);
|
||||
context->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
|
||||
context->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
|
||||
@ -217,9 +219,11 @@ static void load_world_ctx(struct run_context *context)
|
||||
exec_vmwrite64(VMX_TSC_OFFSET_FULL, context->tsc_offset);
|
||||
|
||||
/* VMCS GUEST field */
|
||||
exec_vmwrite(VMX_GUEST_CR0, context->cr0);
|
||||
exec_vmwrite(VMX_CR0_READ_SHADOW, context->cr0);
|
||||
exec_vmwrite(VMX_GUEST_CR3, context->cr3);
|
||||
exec_vmwrite(VMX_GUEST_CR4, context->cr4);
|
||||
exec_vmwrite(VMX_CR4_READ_SHADOW, context->cr4);
|
||||
exec_vmwrite(VMX_GUEST_CR0, context->vmx_cr0);
|
||||
exec_vmwrite(VMX_GUEST_CR4, context->vmx_cr4);
|
||||
exec_vmwrite(VMX_GUEST_RIP, context->rip);
|
||||
exec_vmwrite(VMX_GUEST_RSP, context->rsp);
|
||||
exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags);
|
||||
@ -358,9 +362,17 @@ static bool init_secure_world_env(struct vcpu *vcpu,
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset = 0;
|
||||
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].cr0 =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr0;
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr0 =
|
||||
exec_vmread(VMX_CR0_READ_SHADOW);
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].cr4 =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr4;
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].cr4 =
|
||||
exec_vmread(VMX_CR4_READ_SHADOW);
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].vmx_cr0 =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].vmx_cr0 =
|
||||
exec_vmread(VMX_GUEST_CR0);
|
||||
vcpu->arch_vcpu.contexts[SECURE_WORLD].vmx_cr4 =
|
||||
vcpu->arch_vcpu.contexts[NORMAL_WORLD].vmx_cr4 =
|
||||
exec_vmread(VMX_GUEST_CR4);
|
||||
|
||||
exec_vmwrite(VMX_GUEST_RSP,
|
||||
TRUSTY_EPT_REBASE_GPA + size);
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include <hypervisor.h>
|
||||
#include <hypercall.h>
|
||||
|
||||
/* this hcall is only come from trusty enabled vcpu itself, and cannot be
|
||||
* called from other vcpus
|
||||
*/
|
||||
int64_t hcall_world_switch(struct vcpu *vcpu)
|
||||
{
|
||||
int next_world_id = !(vcpu->arch_vcpu.cur_context);
|
||||
@ -31,6 +34,9 @@ int64_t hcall_world_switch(struct vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* this hcall is only come from trusty enabled vcpu itself, and cannot be
|
||||
* called from other vcpus
|
||||
*/
|
||||
int64_t hcall_initialize_trusty(struct vcpu *vcpu, uint64_t param)
|
||||
{
|
||||
if (!vcpu->vm->sworld_control.sworld_enabled) {
|
||||
|
@ -138,6 +138,9 @@ struct run_context {
|
||||
uint64_t ia32_sysenter_eip;
|
||||
uint64_t ia32_debugctl;
|
||||
|
||||
uint64_t vmx_cr0;
|
||||
uint64_t vmx_cr4;
|
||||
|
||||
/* segment registers */
|
||||
struct segment cs;
|
||||
struct segment ss;
|
||||
|
Loading…
Reference in New Issue
Block a user