hv: fix violations of coding guideline C-FN-16

The coding guideline rule C-FN-16 requires that 'Mixed-use of
C code and assembly code in a single function shall not be allowed',
this patch wraps inline assembly to inline functions.

Tracked-On: #6776
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>

v1-->v2:
    use inline functions for read/write XMM registers
This commit is contained in:
Mingqiang Chi 2021-12-22 10:56:03 +08:00 committed by acrnsi-robot
parent 31a5a907ba
commit b6b69f2178
3 changed files with 32 additions and 18 deletions

View File

@ -225,21 +225,11 @@ void load_iwkey(struct acrn_vcpu *vcpu)
if (pcpu_has_cap(X86_FEATURE_KEYLOCKER) && vcpu->arch.cr4_kl_enabled &&
(get_cpu_var(whose_iwkey) != vcpu)) {
/* Save/restore xmm0/xmm1/xmm2 during the process */
asm volatile ( "movdqu %%xmm0, %0\n"
"movdqu %%xmm1, %1\n"
"movdqu %%xmm2, %2\n"
"movdqu %3, %%xmm0\n"
"movdqu %4, %%xmm1\n"
"movdqu %5, %%xmm2\n"
: "=m"(xmm_save[0]), "=m"(xmm_save[2]), "=m"(xmm_save[4])
: "m"(vcpu->arch.IWKey.integrity_key[0]),
"m"(vcpu->arch.IWKey.encryption_key[0]),
"m"(vcpu->arch.IWKey.encryption_key[2]));
read_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]);
write_xmm_0_2(vcpu->arch.IWKey.integrity_key[0], vcpu->arch.IWKey.encryption_key[0],
vcpu->arch.IWKey.encryption_key[2]);
asm_loadiwkey(0);
asm volatile ( "movdqu %2, %%xmm2\n"
"movdqu %1, %%xmm1\n"
"movdqu %0, %%xmm0\n"
: : "m"(xmm_save[0]), "m"(xmm_save[2]), "m"(xmm_save[4]));
write_xmm_0_2(xmm_save[0], xmm_save[2], xmm_save[4]);
get_cpu_var(whose_iwkey) = vcpu;
}
}

View File

@ -477,10 +477,7 @@ static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu)
if ((vcpu_get_gpreg(vcpu, CPU_REG_RAX) != 0UL)) {
vcpu_inject_gp(vcpu, 0);
} else {
asm volatile ("movdqu %%xmm0, %0\n"
"movdqu %%xmm1, %1\n"
"movdqu %%xmm2, %2\n"
: : "m"(xmm[0]), "m"(xmm[2]), "m"(xmm[4]));
read_xmm_0_2(&xmm[0], &xmm[2], &xmm[4]);
vcpu->arch.IWKey.encryption_key[0] = xmm[2];
vcpu->arch.IWKey.encryption_key[1] = xmm[3];
vcpu->arch.IWKey.encryption_key[2] = xmm[4];

View File

@ -38,6 +38,7 @@
#ifndef CPU_H
#define CPU_H
#include <types.h>
#include <util.h>
#include <acrn_common.h>
#include <asm/msr.h>
@ -482,6 +483,18 @@ void wait_sync_change(volatile const uint64_t *sync, uint64_t wake_sync);
: "r"(value)); \
}
#define CPU_XMM_READ(xmm, result_ptr) \
{ \
asm volatile ("movdqu %%" STRINGIFY(xmm) ", %0": "=m" (*(result_ptr))); \
}
#define CPU_XMM_WRITE(xmm, value) \
{ \
asm volatile ("movdqu %0, %%" STRINGIFY(xmm) \
: /* No output */ \
: "m"(value)); \
}
static inline uint64_t sgdt(void)
{
struct descriptor_table gdtb = {0U, 0UL};
@ -730,6 +743,20 @@ static inline void asm_loadiwkey(uint32_t eax)
asm volatile(".byte 0xf3, 0x0f, 0x38, 0xdc, 0xd1;": : "a" (eax));
}
static inline void read_xmm_0_2(uint64_t *xmm0_addr, uint64_t *xmm1_addr, uint64_t *xmm2_addr)
{
CPU_XMM_READ(xmm0, xmm0_addr);
CPU_XMM_READ(xmm1, xmm1_addr);
CPU_XMM_READ(xmm2, xmm2_addr);
}
static inline void write_xmm_0_2(uint64_t xmm0_val, uint64_t xmm1_val, uint64_t xmm2_val)
{
CPU_XMM_WRITE(xmm0, xmm0_val);
CPU_XMM_WRITE(xmm1, xmm1_val);
CPU_XMM_WRITE(xmm2, xmm2_val);
}
/*
* stac/clac pair is used to access guest's memory protected by SMAP,
* following below flow: