mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 12:12:16 +00:00
hv: move split-lock logic into dedicated file
This patch move the split-lock logic into dedicated file to reduce LOC. This may make the logic more clear. Tracked-On: #5605 Signed-off-by: Jie Deng <jie.deng@intel.com>
This commit is contained in:
parent
27d5711b62
commit
8aebf5526f
@ -318,6 +318,7 @@ VP_DM_C_SRCS += arch/x86/guest/pm.c
|
||||
VP_DM_C_SRCS += arch/x86/guest/assign.c
|
||||
VP_DM_C_SRCS += arch/x86/guest/vmx_io.c
|
||||
VP_DM_C_SRCS += arch/x86/guest/instr_emul.c
|
||||
VP_DM_C_SRCS += arch/x86/guest/splitlock.c
|
||||
VP_DM_C_SRCS += arch/x86/guest/vm_reset.c
|
||||
VP_DM_C_SRCS += common/ptdev.c
|
||||
|
||||
|
180
hypervisor/arch/x86/guest/splitlock.c
Normal file
180
hypervisor/arch/x86/guest/splitlock.c
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <vcpu.h>
|
||||
#include <vm.h>
|
||||
#include <irq.h>
|
||||
#include <event.h>
|
||||
#include <cpu_caps.h>
|
||||
#include <logmsg.h>
|
||||
#include <errno.h>
|
||||
#include <splitlock.h>
|
||||
|
||||
static bool is_guest_ac_enabled(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & (1UL << 29UL)) != 0UL) {
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vcpu_kick_splitlock_emulation(struct acrn_vcpu *cur_vcpu)
|
||||
{
|
||||
struct acrn_vcpu *other;
|
||||
uint16_t i;
|
||||
|
||||
if (cur_vcpu->vm->hw.created_vcpus > 1U) {
|
||||
get_vm_lock(cur_vcpu->vm);
|
||||
|
||||
foreach_vcpu(i, cur_vcpu->vm, other) {
|
||||
if (other != cur_vcpu) {
|
||||
vcpu_make_request(other, ACRN_REQUEST_SPLIT_LOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vcpu_complete_splitlock_emulation(struct acrn_vcpu *cur_vcpu)
|
||||
{
|
||||
struct acrn_vcpu *other;
|
||||
uint16_t i;
|
||||
|
||||
if (cur_vcpu->vm->hw.created_vcpus > 1U) {
|
||||
foreach_vcpu(i, cur_vcpu->vm, other) {
|
||||
if (other != cur_vcpu) {
|
||||
/*
|
||||
* Suppose the current vcpu is 0, the other vcpus (1, 2, 3) may wait on the
|
||||
* "get_vm_lock", the current vcpu need clear the ACRN_REQUEST_SPLIT_LOCK
|
||||
* explicitly here after finishing the emulation. Otherwise, it make cause
|
||||
* dead lock. for example:
|
||||
* 1. Once vcpu 0 "put_vm_lock", let's say vcpu 1 will "get_vm_lock".
|
||||
* 2. vcpu 1 call "vcpu_make_request" to pause vcpu 0, 2, 3.
|
||||
* 3. vcpu 1's VCPU_EVENT_SPLIT_LOCK is still not cleared because
|
||||
* the vcpu 0 called "vcpu_make_request" ever.
|
||||
* 4. All vcpus will wait for VCPU_EVENT_SPLIT_LOCK in acrn_handle_pending_request.
|
||||
* We should avoid this dead lock case.
|
||||
*/
|
||||
bitmap_clear_lock(ACRN_REQUEST_SPLIT_LOCK, &other->arch.pending_req);
|
||||
signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]);
|
||||
}
|
||||
}
|
||||
|
||||
put_vm_lock(cur_vcpu->vm);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, bool *queue_exception)
|
||||
{
|
||||
int32_t status = 0;
|
||||
uint8_t inst[1];
|
||||
uint32_t err_code = 0U;
|
||||
uint64_t fault_addr;
|
||||
|
||||
/* Queue the exception by default if the exception cannot be handled. */
|
||||
*queue_exception = true;
|
||||
|
||||
/*
|
||||
* The split-lock detection is enabled by default if the platform supports it.
|
||||
* Here, we check if the split-lock detection is really enabled or not. If the
|
||||
* split-lock detection is enabled in the platform but not enabled in the guest
|
||||
* then we try to emulate it, otherwise, inject the exception back.
|
||||
*/
|
||||
if (is_ac_enabled() && !is_guest_ac_enabled(vcpu)) {
|
||||
switch (exception_vector) {
|
||||
case IDT_AC:
|
||||
status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr);
|
||||
if (status < 0) {
|
||||
pr_err("Error copy instruction from Guest!");
|
||||
if (status == -EFAULT) {
|
||||
vcpu_inject_pf(vcpu, fault_addr, err_code);
|
||||
status = 0;
|
||||
/* For this case, inject #PF, not to queue #AC */
|
||||
*queue_exception = false;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If AC is caused by instruction with LOCK prefix or xchg, then emulate it,
|
||||
* otherwise, inject it back.
|
||||
*/
|
||||
if (inst[0] == 0xf0U) { /* This is LOCK prefix */
|
||||
/*
|
||||
* Kick other vcpus of the guest to stop execution
|
||||
* until the split-lock emulation being completed.
|
||||
*/
|
||||
vcpu_kick_splitlock_emulation(vcpu);
|
||||
|
||||
/*
|
||||
* Skip the LOCK prefix and re-execute the instruction.
|
||||
*/
|
||||
vcpu->arch.inst_len = 1U;
|
||||
if (vcpu->vm->hw.created_vcpus > 1U) {
|
||||
/* Enable MTF to start single-stepping execution */
|
||||
vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_MON_TRAP;
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
|
||||
vcpu->arch.emulating_lock = true;
|
||||
}
|
||||
|
||||
/* Skip the #AC, we have emulated it. */
|
||||
*queue_exception = false;
|
||||
} else {
|
||||
status = decode_instruction(vcpu);
|
||||
if (status >= 0) {
|
||||
/*
|
||||
* If this is the xchg, then emulate it, otherwise,
|
||||
* inject it back.
|
||||
*/
|
||||
if (is_current_opcode_xchg(vcpu)) {
|
||||
/*
|
||||
* Kick other vcpus of the guest to stop execution
|
||||
* until the split-lock emulation being completed.
|
||||
*/
|
||||
vcpu_kick_splitlock_emulation(vcpu);
|
||||
|
||||
/*
|
||||
* Using emulating_lock to make sure xchg emulation
|
||||
* is only called by split-lock emulation.
|
||||
*/
|
||||
vcpu->arch.emulating_lock = true;
|
||||
status = emulate_instruction(vcpu);
|
||||
vcpu->arch.emulating_lock = false;
|
||||
if (status < 0) {
|
||||
if (status == -EFAULT) {
|
||||
pr_info("page fault happen during emulate_instruction");
|
||||
status = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify other vcpus of the guest to restart execution.
|
||||
*/
|
||||
vcpu_complete_splitlock_emulation(vcpu);
|
||||
|
||||
/* Do not inject #AC, we have emulated it */
|
||||
*queue_exception = false;
|
||||
}
|
||||
} else {
|
||||
if (status == -EFAULT) {
|
||||
pr_info("page fault happen during decode_instruction");
|
||||
status = 0;
|
||||
/* For this case, Inject #PF, not to queue #AC */
|
||||
*queue_exception = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
@ -14,6 +14,7 @@
|
||||
#include <vcpu.h>
|
||||
#include <vmcs.h>
|
||||
#include <vm.h>
|
||||
#include <splitlock.h>
|
||||
#include <trace.h>
|
||||
#include <logmsg.h>
|
||||
|
||||
@ -487,171 +488,6 @@ static inline void acrn_inject_pending_intr(struct acrn_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
static void vcpu_kick_splitlock_emulation(struct acrn_vcpu *cur_vcpu)
|
||||
{
|
||||
struct acrn_vcpu *other;
|
||||
uint16_t i;
|
||||
|
||||
if (cur_vcpu->vm->hw.created_vcpus > 1U) {
|
||||
get_vm_lock(cur_vcpu->vm);
|
||||
|
||||
foreach_vcpu(i, cur_vcpu->vm, other) {
|
||||
if (other != cur_vcpu) {
|
||||
vcpu_make_request(other, ACRN_REQUEST_SPLIT_LOCK);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vcpu_complete_splitlock_emulation(struct acrn_vcpu *cur_vcpu)
|
||||
{
|
||||
struct acrn_vcpu *other;
|
||||
uint16_t i;
|
||||
|
||||
if (cur_vcpu->vm->hw.created_vcpus > 1U) {
|
||||
foreach_vcpu(i, cur_vcpu->vm, other) {
|
||||
if (other != cur_vcpu) {
|
||||
/*
|
||||
* Suppose the current vcpu is 0, the other vcpus (1, 2, 3) may wait on the
|
||||
* "get_vm_lock", the current vcpu need clear the ACRN_REQUEST_SPLIT_LOCK
|
||||
* explicitly here after finishing the emulation. Otherwise, it make cause
|
||||
* dead lock. for example:
|
||||
* 1. Once vcpu 0 "put_vm_lock", let's say vcpu 1 will "get_vm_lock".
|
||||
* 2. vcpu 1 call "vcpu_make_request" to pause vcpu 0, 2, 3.
|
||||
* 3. vcpu 1's VCPU_EVENT_SPLIT_LOCK is still not cleared because
|
||||
* the vcpu 0 called "vcpu_make_request" ever.
|
||||
* 4. All vcpus will wait for VCPU_EVENT_SPLIT_LOCK in acrn_handle_pending_request.
|
||||
* We should avoid this dead lock case.
|
||||
*/
|
||||
bitmap_clear_lock(ACRN_REQUEST_SPLIT_LOCK, &other->arch.pending_req);
|
||||
signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]);
|
||||
}
|
||||
}
|
||||
|
||||
put_vm_lock(cur_vcpu->vm);
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_guest_ac_enabled(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if ((vcpu_get_guest_msr(vcpu, MSR_TEST_CTL) & (1UL << 29UL)) != 0UL) {
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, bool *queue_exception)
|
||||
{
|
||||
int32_t status = 0;
|
||||
uint8_t inst[1];
|
||||
uint32_t err_code = 0U;
|
||||
uint64_t fault_addr;
|
||||
|
||||
/* Queue the exception by default if the exception cannot be handled. */
|
||||
*queue_exception = true;
|
||||
|
||||
/*
|
||||
* The split-lock detection is enabled by default if the platform supports it.
|
||||
* Here, we check if the split-lock detection is really enabled or not. If the
|
||||
* split-lock detection is enabled in the platform but not enabled in the guest
|
||||
* then we try to emulate it, otherwise, inject the exception back.
|
||||
*/
|
||||
if (is_ac_enabled() && !is_guest_ac_enabled(vcpu)) {
|
||||
switch (exception_vector) {
|
||||
case IDT_AC:
|
||||
status = copy_from_gva(vcpu, inst, vcpu_get_rip(vcpu), 1U, &err_code, &fault_addr);
|
||||
if (status < 0) {
|
||||
pr_err("Error copy instruction from Guest!");
|
||||
if (status == -EFAULT) {
|
||||
vcpu_inject_pf(vcpu, fault_addr, err_code);
|
||||
status = 0;
|
||||
/* For this case, inject #PF, not to queue #AC */
|
||||
*queue_exception = false;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If AC is caused by instruction with LOCK prefix or xchg, then emulate it,
|
||||
* otherwise, inject it back.
|
||||
*/
|
||||
if (inst[0] == 0xf0U) { /* This is LOCK prefix */
|
||||
/*
|
||||
* Kick other vcpus of the guest to stop execution
|
||||
* until the split-lock emulation being completed.
|
||||
*/
|
||||
vcpu_kick_splitlock_emulation(vcpu);
|
||||
|
||||
/*
|
||||
* Skip the LOCK prefix and re-execute the instruction.
|
||||
*/
|
||||
vcpu->arch.inst_len = 1U;
|
||||
if (vcpu->vm->hw.created_vcpus > 1U) {
|
||||
/* Enable MTF to start single-stepping execution */
|
||||
vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_MON_TRAP;
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls);
|
||||
vcpu->arch.emulating_lock = true;
|
||||
}
|
||||
|
||||
/* Skip the #AC, we have emulated it. */
|
||||
*queue_exception = false;
|
||||
} else {
|
||||
status = decode_instruction(vcpu);
|
||||
if (status >= 0) {
|
||||
/*
|
||||
* If this is the xchg, then emulate it, otherwise,
|
||||
* inject it back.
|
||||
*/
|
||||
if (is_current_opcode_xchg(vcpu)) {
|
||||
/*
|
||||
* Kick other vcpus of the guest to stop execution
|
||||
* until the split-lock emulation being completed.
|
||||
*/
|
||||
vcpu_kick_splitlock_emulation(vcpu);
|
||||
|
||||
/*
|
||||
* Using emulating_lock to make sure xchg emulation
|
||||
* is only called by split-lock emulation.
|
||||
*/
|
||||
vcpu->arch.emulating_lock = true;
|
||||
status = emulate_instruction(vcpu);
|
||||
vcpu->arch.emulating_lock = false;
|
||||
if (status < 0) {
|
||||
if (status == -EFAULT) {
|
||||
pr_info("page fault happen during emulate_instruction");
|
||||
status = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify other vcpus of the guest to restart execution.
|
||||
*/
|
||||
vcpu_complete_splitlock_emulation(vcpu);
|
||||
|
||||
/* Do not inject #AC, we have emulated it */
|
||||
*queue_exception = false;
|
||||
}
|
||||
} else {
|
||||
if (status == -EFAULT) {
|
||||
pr_info("page fault happen during decode_instruction");
|
||||
status = 0;
|
||||
/* For this case, Inject #PF, not to queue #AC */
|
||||
*queue_exception = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <vmexit.h>
|
||||
#include <vm_reset.h>
|
||||
#include <vmx_io.h>
|
||||
#include <splitlock.h>
|
||||
#include <ept.h>
|
||||
#include <vtd.h>
|
||||
#include <vcpuid.h>
|
||||
@ -272,23 +273,6 @@ static int32_t unhandled_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vcpu_complete_split_lock_emulation(struct acrn_vcpu *cur_vcpu)
|
||||
{
|
||||
struct acrn_vcpu *other;
|
||||
uint16_t i;
|
||||
|
||||
if (cur_vcpu->vm->hw.created_vcpus > 1U) {
|
||||
foreach_vcpu(i, cur_vcpu->vm, other) {
|
||||
if (other != cur_vcpu) {
|
||||
bitmap_clear_lock(ACRN_REQUEST_SPLIT_LOCK, &other->arch.pending_req);
|
||||
signal_event(&other->events[VCPU_EVENT_SPLIT_LOCK]);
|
||||
}
|
||||
}
|
||||
|
||||
put_vm_lock(cur_vcpu->vm);
|
||||
}
|
||||
}
|
||||
|
||||
/* MTF is currently only used for split-lock emulation */
|
||||
static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
@ -299,7 +283,7 @@ static int32_t mtf_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
|
||||
if (vcpu->arch.emulating_lock) {
|
||||
vcpu->arch.emulating_lock = false;
|
||||
vcpu_complete_split_lock_emulation(vcpu);
|
||||
vcpu_complete_splitlock_emulation(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
14
hypervisor/include/arch/x86/guest/splitlock.h
Normal file
14
hypervisor/include/arch/x86/guest/splitlock.h
Normal file
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Copyright (C) 2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef SPLITLOCK_H_
|
||||
#define SPLITLOCK_H_
|
||||
|
||||
void vcpu_kick_splitlock_emulation(struct acrn_vcpu *cur_vcpu);
|
||||
void vcpu_complete_splitlock_emulation(struct acrn_vcpu *cur_vcpu);
|
||||
int32_t emulate_splitlock(struct acrn_vcpu *vcpu, uint32_t exception_vector, bool *queue_exception);
|
||||
|
||||
#endif /* SPLITLOCK_H_ */
|
Loading…
Reference in New Issue
Block a user