mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-18 19:57:31 +00:00
trusty: Simulate Secure Monitor Call(SMC) by Hypercall
For ARM, The SMC instruction is used to generate a synchronous exception that is handled by Secure Monitor code running in EL3. In the ARM architecture, synchronous control is transferred between the normal Non-secure state and the Secure state through Secure Monitor Call exceptions. SMC exceptions are generated by the SMC instruction, and handled by the Secure Monitor.The operation of the Secure Monitor is determined by the parameters that are passed in through registers. For ACRN, Hypervisor will simulate SMC by hypercall to switch vCPU State between Normal World and Secure World. There are 4 registers(RDI, RSI, RDX, RBX) reserved for paramters passing between Normal World and Secure World. Signed-off-by: Qi Yadong <yadong.qi@intel.com>
This commit is contained in:
parent
0d9d628d9a
commit
1fd07ba349
@ -134,6 +134,7 @@ C_SRCS += lib/crypto/hkdf.c
|
||||
C_SRCS += lib/sprintf.c
|
||||
C_SRCS += common/hv_main.c
|
||||
C_SRCS += common/hypercall.c
|
||||
C_SRCS += common/trusty_hypercall.c
|
||||
C_SRCS += common/schedule.c
|
||||
C_SRCS += common/vm_load.c
|
||||
|
||||
|
@ -134,6 +134,10 @@ int vmcall_handler(struct vcpu *vcpu)
|
||||
ret = hcall_setup_sbuf(vm, param1);
|
||||
break;
|
||||
|
||||
case HC_WORLD_SWITCH:
|
||||
ret = hcall_world_switch(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("op %d: Invalid hypercall\n", hypcall_id);
|
||||
ret = -1;
|
||||
|
@ -31,6 +31,29 @@
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <acrn_hv_defs.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
_Static_assert(NR_WORLD == 2, "Only 2 Worlds supported!");
|
||||
|
||||
/* Trusty EPT rebase gpa: 511G */
|
||||
#define TRUSTY_EPT_REBASE_GPA (511ULL*1024ULL*1024ULL*1024ULL)
|
||||
|
||||
#define save_segment(seg, SEG_NAME) \
|
||||
{ \
|
||||
seg.selector = exec_vmread(VMX_GUEST_##SEG_NAME##_SEL); \
|
||||
seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \
|
||||
seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \
|
||||
seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \
|
||||
}
|
||||
|
||||
#define load_segment(seg, SEG_NAME) \
|
||||
{ \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
||||
}
|
||||
|
||||
void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
||||
uint64_t size, uint64_t rebased_gpa)
|
||||
@ -97,3 +120,122 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
||||
|
||||
}
|
||||
|
||||
static void save_world_ctx(struct run_context *context)
|
||||
{
|
||||
/* VMCS Execution field */
|
||||
context->tsc_offset = exec_vmread64(VMX_TSC_OFFSET_FULL);
|
||||
|
||||
/* VMCS GUEST field */
|
||||
/* CR3, RIP, RSP, RFLAGS already saved on VMEXIT */
|
||||
context->cr0 = exec_vmread(VMX_GUEST_CR0);
|
||||
context->cr4 = exec_vmread(VMX_GUEST_CR4);
|
||||
context->dr7 = exec_vmread(VMX_GUEST_DR7);
|
||||
context->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL);
|
||||
context->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
|
||||
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
|
||||
context->ia32_sysenter_cs = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
|
||||
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
|
||||
save_segment(context->cs, CS);
|
||||
save_segment(context->ss, SS);
|
||||
save_segment(context->ds, DS);
|
||||
save_segment(context->es, ES);
|
||||
save_segment(context->fs, FS);
|
||||
save_segment(context->gs, GS);
|
||||
save_segment(context->tr, TR);
|
||||
save_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
|
||||
context->idtr.limit = exec_vmread(VMX_GUEST_IDTR_LIMIT);
|
||||
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
|
||||
context->gdtr.limit = exec_vmread(VMX_GUEST_GDTR_LIMIT);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
context->ia32_star = msr_read(MSR_IA32_STAR);
|
||||
context->ia32_lstar = msr_read(MSR_IA32_LSTAR);
|
||||
context->ia32_fmask = msr_read(MSR_IA32_FMASK);
|
||||
context->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxsave (%0)"
|
||||
: : "r" (context->fxstore_guest_area) : "memory");
|
||||
}
|
||||
|
||||
static void load_world_ctx(struct run_context *context)
|
||||
{
|
||||
/* VMCS Execution field */
|
||||
exec_vmwrite64(VMX_TSC_OFFSET_FULL, context->tsc_offset);
|
||||
|
||||
/* VMCS GUEST field */
|
||||
exec_vmwrite(VMX_GUEST_CR0, context->cr0);
|
||||
exec_vmwrite(VMX_GUEST_CR3, context->cr3);
|
||||
exec_vmwrite(VMX_GUEST_CR4, context->cr4);
|
||||
exec_vmwrite(VMX_GUEST_RIP, context->rip);
|
||||
exec_vmwrite(VMX_GUEST_RSP, context->rsp);
|
||||
exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags);
|
||||
exec_vmwrite(VMX_GUEST_DR7, context->dr7);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
|
||||
load_segment(context->cs, CS);
|
||||
load_segment(context->ss, SS);
|
||||
load_segment(context->ds, DS);
|
||||
load_segment(context->es, ES);
|
||||
load_segment(context->fs, FS);
|
||||
load_segment(context->gs, GS);
|
||||
load_segment(context->tr, TR);
|
||||
load_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
|
||||
exec_vmwrite(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
msr_write(MSR_IA32_STAR, context->ia32_star);
|
||||
msr_write(MSR_IA32_LSTAR, context->ia32_lstar);
|
||||
msr_write(MSR_IA32_FMASK, context->ia32_fmask);
|
||||
msr_write(MSR_IA32_KERNEL_GS_BASE, context->ia32_kernel_gs_base);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxrstor (%0)" : : "r" (context->fxstore_guest_area));
|
||||
}
|
||||
|
||||
static void copy_smc_param(struct run_context *prev_ctx,
|
||||
struct run_context *next_ctx)
|
||||
{
|
||||
next_ctx->guest_cpu_regs.regs.rdi = prev_ctx->guest_cpu_regs.regs.rdi;
|
||||
next_ctx->guest_cpu_regs.regs.rsi = prev_ctx->guest_cpu_regs.regs.rsi;
|
||||
next_ctx->guest_cpu_regs.regs.rdx = prev_ctx->guest_cpu_regs.regs.rdx;
|
||||
next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx;
|
||||
}
|
||||
|
||||
void switch_world(struct vcpu *vcpu, int next_world)
|
||||
{
|
||||
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
|
||||
|
||||
/* save previous world context */
|
||||
save_world_ctx(&arch_vcpu->contexts[!next_world]);
|
||||
|
||||
/* load next world context */
|
||||
load_world_ctx(&arch_vcpu->contexts[next_world]);
|
||||
|
||||
/* Copy SMC parameters: RDI, RSI, RDX, RBX */
|
||||
copy_smc_param(&arch_vcpu->contexts[!next_world],
|
||||
&arch_vcpu->contexts[next_world]);
|
||||
|
||||
/* load EPTP for next world */
|
||||
if (next_world == NORMAL_WORLD) {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
((uint64_t)vcpu->vm->arch_vm.nworld_eptp) | (3<<3) | 6);
|
||||
} else {
|
||||
exec_vmwrite64(VMX_EPT_POINTER_FULL,
|
||||
((uint64_t)vcpu->vm->arch_vm.sworld_eptp) | (3<<3) | 6);
|
||||
}
|
||||
|
||||
/* Update world index */
|
||||
arch_vcpu->cur_context = next_world;
|
||||
}
|
||||
|
@ -40,6 +40,18 @@
|
||||
|
||||
#define ACRN_DBG_HYCALL 6
|
||||
|
||||
bool is_hypercall_from_ring0(void)
|
||||
{
|
||||
uint64_t cs_sel;
|
||||
|
||||
cs_sel = exec_vmread(VMX_GUEST_CS_SEL);
|
||||
/* cs_selector[1:0] is CPL */
|
||||
if ((cs_sel & 0x3) == 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t hcall_get_api_version(struct vm *vm, uint64_t param)
|
||||
{
|
||||
struct hc_api_version version;
|
||||
|
63
hypervisor/common/trusty_hypercall.c
Normal file
63
hypervisor/common/trusty_hypercall.c
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <hv_lib.h>
|
||||
#include <acrn_common.h>
|
||||
#include <hv_arch.h>
|
||||
#include <hypercall.h>
|
||||
#include <acrn_hv_defs.h>
|
||||
#include <hv_debug.h>
|
||||
|
||||
int64_t hcall_world_switch(struct vcpu *vcpu)
|
||||
{
|
||||
int next_world_id = !(vcpu->arch_vcpu.cur_context);
|
||||
|
||||
if (!is_hypercall_from_ring0()) {
|
||||
pr_err("%s() is only allowed from RING-0!\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!vcpu->vm->sworld_control.sworld_enabled) {
|
||||
pr_err("Secure World is not enabled!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!vcpu->vm->arch_vm.sworld_eptp) {
|
||||
pr_err("Trusty is not launched!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(next_world_id < NR_WORLD,
|
||||
"world_id exceed max number of Worlds");
|
||||
|
||||
switch_world(vcpu, next_world_id);
|
||||
return 0;
|
||||
}
|
@ -128,5 +128,7 @@ struct secure_world_control {
|
||||
struct secure_world_memory sworld_memory;
|
||||
};
|
||||
|
||||
void switch_world(struct vcpu *vcpu, int next_world);
|
||||
|
||||
#endif /* TRUSTY_H_ */
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
|
||||
struct vhm_request;
|
||||
|
||||
bool is_hypercall_from_ring0(void);
|
||||
int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req);
|
||||
int acrn_insert_request_nowait(struct vcpu *vcpu, struct vhm_request *req);
|
||||
int get_req_info(char *str, int str_max);
|
||||
@ -325,6 +326,15 @@ int64_t hcall_reset_ptdev_intr_info(struct vm *vm, uint64_t vmid,
|
||||
*/
|
||||
int64_t hcall_setup_sbuf(struct vm *vm, uint64_t param);
|
||||
|
||||
/**
|
||||
* @brief Switch VCPU state between Normal/Secure World.
|
||||
*
|
||||
* @param VCPU Pointer to VCPU data structure
|
||||
*
|
||||
* @return 0 on success, non-zero on error.
|
||||
*/
|
||||
int64_t hcall_world_switch(struct vcpu *vcpu);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
@ -229,4 +229,5 @@ struct hc_api_version {
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* ACRN_HV_DEFS_H */
|
||||
|
Loading…
Reference in New Issue
Block a user