mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-20 16:28:10 +00:00
trusty: Simulate Secure Monitor Call(SMC) by Hypercall
For ARM, The SMC instruction is used to generate a synchronous exception that is handled by Secure Monitor code running in EL3. In the ARM architecture, synchronous control is transferred between the normal Non-secure state and the Secure state through Secure Monitor Call exceptions. SMC exceptions are generated by the SMC instruction, and handled by the Secure Monitor.The operation of the Secure Monitor is determined by the parameters that are passed in through registers. For ACRN, Hypervisor will simulate SMC by hypercall to switch vCPU State between Normal World and Secure World. There are 4 registers(RDI, RSI, RDX, RBX) reserved for paramters passing between Normal World and Secure World. Signed-off-by: Qi Yadong <yadong.qi@intel.com>
This commit is contained in:
@@ -134,6 +134,10 @@ int vmcall_handler(struct vcpu *vcpu)
|
||||
ret = hcall_setup_sbuf(vm, param1);
|
||||
break;
|
||||
|
||||
case HC_WORLD_SWITCH:
|
||||
ret = hcall_world_switch(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("op %d: Invalid hypercall\n", hypcall_id);
|
||||
ret = -1;
|
||||
|
@@ -31,6 +31,29 @@
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <acrn_hv_defs.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
_Static_assert(NR_WORLD == 2, "Only 2 Worlds supported!");
|
||||
|
||||
/* Trusty EPT rebase gpa: 511G */
|
||||
#define TRUSTY_EPT_REBASE_GPA (511ULL*1024ULL*1024ULL*1024ULL)
|
||||
|
||||
#define save_segment(seg, SEG_NAME) \
|
||||
{ \
|
||||
seg.selector = exec_vmread(VMX_GUEST_##SEG_NAME##_SEL); \
|
||||
seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \
|
||||
seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \
|
||||
seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \
|
||||
}
|
||||
|
||||
#define load_segment(seg, SEG_NAME) \
|
||||
{ \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
||||
}
|
||||
|
||||
void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
||||
uint64_t size, uint64_t rebased_gpa)
|
||||
@@ -97,3 +120,122 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
||||
|
||||
}
|
||||
|
||||
static void save_world_ctx(struct run_context *context)
|
||||
{
|
||||
/* VMCS Execution field */
|
||||
context->tsc_offset = exec_vmread64(VMX_TSC_OFFSET_FULL);
|
||||
|
||||
/* VMCS GUEST field */
|
||||
/* CR3, RIP, RSP, RFLAGS already saved on VMEXIT */
|
||||
context->cr0 = exec_vmread(VMX_GUEST_CR0);
|
||||
context->cr4 = exec_vmread(VMX_GUEST_CR4);
|
||||
context->dr7 = exec_vmread(VMX_GUEST_DR7);
|
||||
context->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
|
||||
context->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
|
||||
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
|
||||
context->ia32_sysenter_cs = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
|
||||
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
|
||||
save_segment(context->cs, CS);
|
||||
save_segment(context->ss, SS);
|
||||
save_segment(context->ds, DS);
|
||||
save_segment(context->es, ES);
|
||||
save_segment(context->fs, FS);
|
||||
save_segment(context->gs, GS);
|
||||
save_segment(context->tr, TR);
|
||||
save_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
|
||||
context->idtr.limit = exec_vmread(VMX_GUEST_IDTR_LIMIT);
|
||||
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
|
||||
context->gdtr.limit = exec_vmread(VMX_GUEST_GDTR_LIMIT);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
context->ia32_star = msr_read(MSR_IA32_STAR);
|
||||
context->ia32_lstar = msr_read(MSR_IA32_LSTAR);
|
||||
context->ia32_fmask = msr_read(MSR_IA32_FMASK);
|
||||
context->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxsave (%0)"
|
||||
: : "r" (context->fxstore_guest_area) : "memory");
|
||||
}
|
||||
|
||||
static void load_world_ctx(struct run_context *context)
|
||||
{
|
||||
/* VMCS Execution field */
|
||||
exec_vmwrite64(VMX_TSC_OFFSET_FULL, context->tsc_offset);
|
||||
|
||||
/* VMCS GUEST field */
|
||||
exec_vmwrite(VMX_GUEST_CR0, context->cr0);
|
||||
exec_vmwrite(VMX_GUEST_CR3, context->cr3);
|
||||
exec_vmwrite(VMX_GUEST_CR4, context->cr4);
|
||||
exec_vmwrite(VMX_GUEST_RIP, context->rip);
|
||||
exec_vmwrite(VMX_GUEST_RSP, context->rsp);
|
||||
exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags);
|
||||
exec_vmwrite(VMX_GUEST_DR7, context->dr7);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
|
||||
load_segment(context->cs, CS);
|
||||
load_segment(context->ss, SS);
|
||||
load_segment(context->ds, DS);
|
||||
load_segment(context->es, ES);
|
||||
load_segment(context->fs, FS);
|
||||
load_segment(context->gs, GS);
|
||||
load_segment(context->tr, TR);
|
||||
load_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
|
||||
exec_vmwrite(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
msr_write(MSR_IA32_STAR, context->ia32_star);
|
||||
msr_write(MSR_IA32_LSTAR, context->ia32_lstar);
|
||||
msr_write(MSR_IA32_FMASK, context->ia32_fmask);
|
||||
msr_write(MSR_IA32_KERNEL_GS_BASE, context->ia32_kernel_gs_base);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxrstor (%0)" : : "r" (context->fxstore_guest_area));
|
||||
}
|
||||
|
||||
static void copy_smc_param(struct run_context *prev_ctx,
|
||||
struct run_context *next_ctx)
|
||||
{
|
||||
next_ctx->guest_cpu_regs.regs.rdi = prev_ctx->guest_cpu_regs.regs.rdi;
|
||||
next_ctx->guest_cpu_regs.regs.rsi = prev_ctx->guest_cpu_regs.regs.rsi;
|
||||
next_ctx->guest_cpu_regs.regs.rdx = prev_ctx->guest_cpu_regs.regs.rdx;
|
||||
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
|
||||
}
|
||||
|
||||
void switch_world(struct vcpu *vcpu, int next_world)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
|
||||
/* save previous world context */
|
||||
save_world_ctx(&arch_vcpu->contexts[!next_world]);
|
||||
|
||||
/* load next world context */
|
||||
load_world_ctx(&arch_vcpu->contexts[next_world]);
|
||||
|
||||
/* Copy SMC parameters: RDI, RSI, RDX, RBX */
|
||||
copy_smc_param(&arch_vcpu->contexts[!next_world],
|
||||
&arch_vcpu->contexts[next_world]);
|
||||
|
||||
/* load EPTP for next world */
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
((uint64_t)vcpu->vm->arch_vm.nworld_eptp) | (3<<3) | 6);
|
||||
} else {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
((uint64_t)vcpu->vm->arch_vm.sworld_eptp) | (3<<3) | 6);
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
arch_vcpu->cur_context = next_world;
|
||||
}
|
||||
|
Reference in New Issue
Block a user