From 599894e57153c91821228061fab253a94db49e88 Mon Sep 17 00:00:00 2001 From: "Jiang, Yanting" Date: Thu, 28 Apr 2022 14:50:50 +0800 Subject: [PATCH] Fix: write xmm registers correctly The movdqu instruction moves unaligned double quadword (128 bit) contained in XMM registers. This patch uses pointers as input parameters of the function write_xmm_0_2() to get 128-bit value from 64-bit array for each XMM register. Tracked-On: #7380 Reviewed-by: Fei Li Signed-off-by: Jiang, Yanting --- hypervisor/arch/x86/guest/vcpu.c | 6 +++--- hypervisor/include/arch/x86/asm/cpu.h | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) mode change 100644 => 100755 hypervisor/arch/x86/guest/vcpu.c mode change 100644 => 100755 hypervisor/include/arch/x86/asm/cpu.h diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c old mode 100644 new mode 100755 index a561cb8bf..efb87caf4 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -226,10 +226,10 @@ void load_iwkey(struct acrn_vcpu *vcpu) (get_cpu_var(whose_iwkey) != vcpu)) { /* Save/restore xmm0/xmm1/xmm2 during the process */ read_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]); - write_xmm_0_2(vcpu->arch.IWKey.integrity_key[0], vcpu->arch.IWKey.encryption_key[0], - vcpu->arch.IWKey.encryption_key[2]); + write_xmm_0_2(&vcpu->arch.IWKey.integrity_key[0], &vcpu->arch.IWKey.encryption_key[0], + &vcpu->arch.IWKey.encryption_key[2]); asm_loadiwkey(0); - write_xmm_0_2(xmm_save[0], xmm_save[2], xmm_save[4]); + write_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]); get_cpu_var(whose_iwkey) = vcpu; } } diff --git a/hypervisor/include/arch/x86/asm/cpu.h b/hypervisor/include/arch/x86/asm/cpu.h old mode 100644 new mode 100755 index 5475379c8..d41216aeb --- a/hypervisor/include/arch/x86/asm/cpu.h +++ b/hypervisor/include/arch/x86/asm/cpu.h @@ -482,16 +482,16 @@ void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync); : "r"(value)); \ } -#define CPU_XMM_READ(xmm, result_ptr) \ +#define CPU_XMM_READ(xmm, result_m128i_p) \ { \ - asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_ptr))); \ + asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_m128i_p))); \ } -#define CPU_XMM_WRITE(xmm, value) \ +#define CPU_XMM_WRITE(xmm, input_m128i_p) \ { \ asm volatile ("movdqu %0, %%" STRINGIFY(xmm) \ : /* No output */ \ - : "m"(value)); \ + : "m"(*(input_m128i_p))); \ } static inline uint64_t sgdt(void) @@ -749,11 +749,11 @@ static inline void read_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64 CPU_XMM_READ(xmm2, xmm2_addr); } -static inline void write_xmm_0_2(uint64_t xmm0_val, uint64_t xmm1_val, uint64_t xmm2_val) +static inline void write_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64_t *xmm2_addr) { - CPU_XMM_WRITE(xmm0, xmm0_val); - CPU_XMM_WRITE(xmm1, xmm1_val); - CPU_XMM_WRITE(xmm2, xmm2_val); + CPU_XMM_WRITE(xmm0, xmm0_addr); + CPU_XMM_WRITE(xmm1, xmm1_addr); + CPU_XMM_WRITE(xmm2, xmm2_addr); } /*