HV: re-use split-lock emulation code for uc-lock

Split-lock emulation can be re-used for uc-lock. In emulate_splitlock(),
it only work if this vmexit is for #AC trap and guest do not handle
split-lock and HV enable #AC for splitlock.
Add another condition to let emulate_splitlock() also work for #GP trap
and guest do not handle uc-lock and HV enable #GP for uc-lock.

Tracked-On: #6299
Signed-off-by: Tao Yuhong <yuhong.tao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Tao Yuhong 2021-07-12 09:15:23 -04:00 committed by wenlingz
parent 553d59644b
commit bbd7b7091b
4 changed files with 46 additions and 12 deletions

View File

@ -153,13 +153,25 @@ bool is_ac_enabled(void)
{ {
bool ac_enabled = false; bool ac_enabled = false;
if (has_core_cap(1U << 5U) && (msr_read(MSR_TEST_CTL) & (1U << 29U))) { if (has_core_cap(CORE_CAP_SPLIT_LOCK) && (msr_read(MSR_TEST_CTL) & MSR_TEST_CTL_AC_SPLITLOCK)) {
ac_enabled = true; ac_enabled = true;
} }
return ac_enabled; return ac_enabled;
} }
bool is_gp_enabled(void)
{
bool gp_enabled = false;
if (has_core_cap(CORE_CAP_UC_LOCK) && (msr_read(MSR_TEST_CTL) & MSR_TEST_CTL_GP_UCLOCK)) {
gp_enabled = true;
}
return gp_enabled;
}
static void detect_ept_cap(void) static void detect_ept_cap(void)
{ {
uint64_t msr_val; uint64_t msr_val;

View File

@ -18,7 +18,18 @@ static bool is_guest_ac_enabled(struct acrn_vcpu *vcpu)
{ {
bool ret = false; bool ret = false;
if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & (1UL << 29UL)) != 0UL) { if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & MSR_TEST_CTL_AC_SPLITLOCK) != 0UL) {
ret = true;
}
return ret;
}
static bool is_guest_gp_enabled(struct acrn_vcpu *vcpu)
{
bool ret = false;
if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & MSR_TEST_CTL_GP_UCLOCK) != 0UL) {
ret = true; ret = true;
} }
@ -68,14 +79,15 @@ int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, boo
*queue_exception = true; *queue_exception = true;
/* /*
* The split-lock detection is enabled by default if the platform supports it. * The split-lock/uc-lock detection is enabled by default if the platform supports it.
* Here, we check if the split-lock detection is really enabled or not. If the * Here, we check if the split-lock detection is really enabled or not. If the
* split-lock detection is enabled in the platform but not enabled in the guest * split-lock/uc-lock detection is enabled in the platform but not enabled in the guest
* then we try to emulate it, otherwise, inject the exception back. * then we try to emulate it, otherwise, inject the exception back.
*/ */
if (is_ac_enabled() && !is_guest_ac_enabled(vcpu)) { if ((is_ac_enabled() && !is_guest_ac_enabled(vcpu)) || (is_gp_enabled() && !is_guest_gp_enabled(vcpu))){
switch (exception_vector) { switch (exception_vector) {
case IDT_AC: case IDT_AC:
case IDT_GP:
status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr); status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr);
if (status < 0) { if (status < 0) {
pr_err("Error copy instruction from Guest!"); pr_err("Error copy instruction from Guest!");
@ -87,13 +99,13 @@ int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, boo
} }
} else { } else {
/* /*
* If AC is caused by instruction with LOCK prefix or xchg, then emulate it, * If #AC/#GP is caused by instruction with LOCK prefix or xchg, then emulate it,
* otherwise, inject it back. * otherwise, inject it back.
*/ */
if (inst[0] == 0xf0U) { /* This is LOCK prefix */ if (inst[0] == 0xf0U) { /* This is LOCK prefix */
/* /*
* Kick other vcpus of the guest to stop execution * Kick other vcpus of the guest to stop execution
* until the split-lock emulation being completed. * until the split-lock/uc-lock emulation being completed.
*/ */
vcpu_kick_splitlock_emulation(vcpu); vcpu_kick_splitlock_emulation(vcpu);
@ -108,7 +120,7 @@ int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, boo
vcpu->arch.emulating_lock = true; vcpu->arch.emulating_lock = true;
} }
/* Skip the #AC, we have emulated it. */ /* Skip the #AC/#GP, we have emulated it. */
*queue_exception = false; *queue_exception = false;
} else { } else {
status = decode_instruction(vcpu, false); status = decode_instruction(vcpu, false);
@ -120,13 +132,13 @@ int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, boo
if (is_current_opcode_xchg(vcpu)) { if (is_current_opcode_xchg(vcpu)) {
/* /*
* Kick other vcpus of the guest to stop execution * Kick other vcpus of the guest to stop execution
* until the split-lock emulation being completed. * until the split-lock/uc-lock emulation being completed.
*/ */
vcpu_kick_splitlock_emulation(vcpu); vcpu_kick_splitlock_emulation(vcpu);
/* /*
* Using emulating_lock to make sure xchg emulation * Using emulating_lock to make sure xchg emulation
* is only called by split-lock emulation. * is only called by split-lock/uc-lock emulation.
*/ */
vcpu->arch.emulating_lock = true; vcpu->arch.emulating_lock = true;
status = emulate_instruction(vcpu); status = emulate_instruction(vcpu);
@ -143,13 +155,13 @@ int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, boo
*/ */
vcpu_complete_splitlock_emulation(vcpu); vcpu_complete_splitlock_emulation(vcpu);
/* Do not inject #AC, we have emulated it */ /* Do not inject #AC/#GP, we have emulated it */
*queue_exception = false; *queue_exception = false;
} }
} else { } else {
if (status == -EFAULT) { if (status == -EFAULT) {
pr_info("page fault happen during decode_instruction"); pr_info("page fault happen during decode_instruction");
/* For this case, Inject #PF, not to queue #AC */ /* For this case, Inject #PF, not to queue #AC/#GP */
*queue_exception = false; *queue_exception = false;
} }

View File

@ -54,9 +54,15 @@ bool pcpu_has_vmx_ept_vpid_cap(uint64_t bit_mask);
bool is_apl_platform(void); bool is_apl_platform(void);
bool has_core_cap(uint32_t bit_mask); bool has_core_cap(uint32_t bit_mask);
bool is_ac_enabled(void); bool is_ac_enabled(void);
bool is_gp_enabled(void);
void init_pcpu_capabilities(void); void init_pcpu_capabilities(void);
void init_pcpu_model_name(void); void init_pcpu_model_name(void);
int32_t detect_hardware_support(void); int32_t detect_hardware_support(void);
struct cpuinfo_x86 *get_pcpu_info(void); struct cpuinfo_x86 *get_pcpu_info(void);
/* The bits of MSR IA32_CORE_CAPABILITIES */
#define CORE_CAP_SPLIT_LOCK (1U << 5U) /* support #AC for Split-locked Access */
#define CORE_CAP_UC_LOCK (1U << 4U) /* support #GP for non-guaranteed-atomic-locked access at Non-WB memory */
#endif /* CPUINFO_H */ #endif /* CPUINFO_H */

View File

@ -595,6 +595,10 @@
/* 5 high-order bits in every field are reserved */ /* 5 high-order bits in every field are reserved */
#define PAT_FIELD_RSV_BITS (0xF8UL) #define PAT_FIELD_RSV_BITS (0xF8UL)
/* MSR_TEST_CTL bits */
#define MSR_TEST_CTL_GP_UCLOCK (1U << 28U)
#define MSR_TEST_CTL_AC_SPLITLOCK (1U << 29U)
#define MSR_TEST_CTL_DISABLE_LOCK_ASSERTION (1U << 31U)
#ifndef ASSEMBLER #ifndef ASSEMBLER
static inline bool is_pat_mem_type_invalid(uint64_t x) static inline bool is_pat_mem_type_invalid(uint64_t x)