mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-26 15:31:35 +00:00
Fix: write xmm registers correctly
The movdqu instruction moves unaligned double quadword (128 bit) contained in XMM registers. This patch uses pointers as input parameters of the function write_xmm_0_2() to get 128-bit value from 64-bit array for each XMM register. Tracked-On: #7380 Reviewed-by: Fei Li <fei1.li@intel.com> Signed-off-by: Jiang, Yanting <yanting.jiang@intel.com>
This commit is contained in:
parent
1540d3479f
commit
599894e571
6
hypervisor/arch/x86/guest/vcpu.c
Normal file → Executable file
6
hypervisor/arch/x86/guest/vcpu.c
Normal file → Executable file
@ -226,10 +226,10 @@ void load_iwkey(struct acrn_vcpu *vcpu)
|
||||
(get_cpu_var(whose_iwkey) != vcpu)) {
|
||||
/* Save/restore xmm0/xmm1/xmm2 during the process */
|
||||
read_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]);
|
||||
write_xmm_0_2(vcpu->arch.IWKey.integrity_key[0], vcpu->arch.IWKey.encryption_key[0],
|
||||
vcpu->arch.IWKey.encryption_key[2]);
|
||||
write_xmm_0_2(&vcpu->arch.IWKey.integrity_key[0], &vcpu->arch.IWKey.encryption_key[0],
|
||||
&vcpu->arch.IWKey.encryption_key[2]);
|
||||
asm_loadiwkey(0);
|
||||
write_xmm_0_2(xmm_save[0], xmm_save[2], xmm_save[4]);
|
||||
write_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]);
|
||||
get_cpu_var(whose_iwkey) = vcpu;
|
||||
}
|
||||
}
|
||||
|
16
hypervisor/include/arch/x86/asm/cpu.h
Normal file → Executable file
16
hypervisor/include/arch/x86/asm/cpu.h
Normal file → Executable file
@ -482,16 +482,16 @@ void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync);
|
||||
: "r"(value)); \
|
||||
}
|
||||
|
||||
#define CPU_XMM_READ(xmm, result_ptr) \
|
||||
#define CPU_XMM_READ(xmm, result_m128i_p) \
|
||||
{ \
|
||||
asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_ptr))); \
|
||||
asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_m128i_p))); \
|
||||
}
|
||||
|
||||
#define CPU_XMM_WRITE(xmm, value) \
|
||||
#define CPU_XMM_WRITE(xmm, input_m128i_p) \
|
||||
{ \
|
||||
asm volatile ("movdqu %0, %%" STRINGIFY(xmm) \
|
||||
: /* No output */ \
|
||||
: "m"(value)); \
|
||||
: "m"(*(input_m128i_p))); \
|
||||
}
|
||||
|
||||
static inline uint64_t sgdt(void)
|
||||
@ -749,11 +749,11 @@ static inline void read_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64
|
||||
CPU_XMM_READ(xmm2, xmm2_addr);
|
||||
}
|
||||
|
||||
static inline void write_xmm_0_2(uint64_t xmm0_val, uint64_t xmm1_val, uint64_t xmm2_val)
|
||||
static inline void write_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64_t *xmm2_addr)
|
||||
{
|
||||
CPU_XMM_WRITE(xmm0, xmm0_val);
|
||||
CPU_XMM_WRITE(xmm1, xmm1_val);
|
||||
CPU_XMM_WRITE(xmm2, xmm2_val);
|
||||
CPU_XMM_WRITE(xmm0, xmm0_addr);
|
||||
CPU_XMM_WRITE(xmm1, xmm1_addr);
|
||||
CPU_XMM_WRITE(xmm2, xmm2_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user