diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index a343227a9..13e22e87d 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -225,21 +225,11 @@ void load_iwkey(struct acrn_vcpu *vcpu) if (pcpu_has_cap(X86_FEATURE_KEYLOCKER) && vcpu->arch.cr4_kl_enabled && (get_cpu_var(whose_iwkey) != vcpu)) { /* Save/restore xmm0/xmm1/xmm2 during the process */ - asm volatile ( "movdqu %%xmm0, %0\n" - "movdqu %%xmm1, %1\n" - "movdqu %%xmm2, %2\n" - "movdqu %3, %%xmm0\n" - "movdqu %4, %%xmm1\n" - "movdqu %5, %%xmm2\n" - : "=m"(xmm_save[0]), "=m"(xmm_save[2]), "=m"(xmm_save[4]) - : "m"(vcpu->arch.IWKey.integrity_key[0]), - "m"(vcpu->arch.IWKey.encryption_key[0]), - "m"(vcpu->arch.IWKey.encryption_key[2])); + read_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]); + write_xmm_0_2(vcpu->arch.IWKey.integrity_key[0], vcpu->arch.IWKey.encryption_key[0], + vcpu->arch.IWKey.encryption_key[2]); asm_loadiwkey(0); - asm volatile ( "movdqu %2, %%xmm2\n" - "movdqu %1, %%xmm1\n" - "movdqu %0, %%xmm0\n" - : : "m"(xmm_save[0]), "m"(xmm_save[2]), "m"(xmm_save[4])); + write_xmm_0_2(xmm_save[0], xmm_save[2], xmm_save[4]); get_cpu_var(whose_iwkey) = vcpu; } } diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index 305daec83..a464e0899 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -477,10 +477,7 @@ static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu) if ((vcpu_get_gpreg(vcpu, CPU_REG_RAX) != 0UL)) { vcpu_inject_gp(vcpu, 0); } else { - asm volatile ("movdqu %%xmm0, %0\n" - "movdqu %%xmm1, %1\n" - "movdqu %%xmm2, %2\n" - : : "m"(xmm[0]), "m"(xmm[2]), "m"(xmm[4])); + read_xmm_0_2(&xmm[0], &xmm[2], &xmm[4]); vcpu->arch.IWKey.encryption_key[0] = xmm[2]; vcpu->arch.IWKey.encryption_key[1] = xmm[3]; vcpu->arch.IWKey.encryption_key[2] = xmm[4]; diff --git a/hypervisor/include/arch/x86/asm/cpu.h b/hypervisor/include/arch/x86/asm/cpu.h index e92216327..e518490eb 100644 --- a/hypervisor/include/arch/x86/asm/cpu.h +++ b/hypervisor/include/arch/x86/asm/cpu.h @@ -38,6 +38,7 @@ #ifndef CPU_H #define CPU_H #include +#include #include #include @@ -482,6 +483,18 @@ void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync); : "r"(value)); \ } +#define CPU_XMM_READ(xmm, result_ptr) \ +{ \ + asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_ptr))); \ +} + +#define CPU_XMM_WRITE(xmm, value) \ +{ \ + asm volatile ("movdqu %0, %%" STRINGIFY(xmm) \ + : /* No output */ \ + : "m"(value)); \ +} + static inline uint64_t sgdt(void) { struct descriptor_table gdtb = {0U, 0UL}; @@ -730,6 +743,20 @@ static inline void asm_loadiwkey(uint32_t eax) asm volatile(".byte 0xf3, 0x0f, 0x38, 0xdc, 0xd1;": : "a" (eax)); } +static inline void read_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64_t *xmm2_addr) +{ + CPU_XMM_READ(xmm0, xmm0_addr); + CPU_XMM_READ(xmm1, xmm1_addr); + CPU_XMM_READ(xmm2, xmm2_addr); +} + +static inline void write_xmm_0_2(uint64_t xmm0_val, uint64_t xmm1_val, uint64_t xmm2_val) +{ + CPU_XMM_WRITE(xmm0, xmm0_val); + CPU_XMM_WRITE(xmm1, xmm1_val); + CPU_XMM_WRITE(xmm2, xmm2_val); +} + /* * stac/clac pair is used to access guest's memory protected by SMAP, * following below flow: