hv: operations on vcpu->reg_cached/reg_updated don't need LOCK prefix

In run time, one vCPU won't read or write a register on other vCPUs,
thus we don't need the LOCK prefixed instructions on reg_cached and
reg_updated.

Tracked-On: #6289
Signed-off-by: Zide Chen <zide.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Zide Chen 2021-09-30 21:52:43 -07:00 committed by wenlingz
parent 2b683f8f5b
commit 228b052fdb
4 changed files with 24 additions and 25 deletions

View File

@ -1338,8 +1338,8 @@ static void set_vmcs01_guest_state(struct acrn_vcpu *vcpu)
*/
exec_vmwrite(VMX_GUEST_CR0, vmcs12->host_cr0);
exec_vmwrite(VMX_GUEST_CR4, vmcs12->host_cr4);
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
bitmap_clear_nolock(CPU_REG_CR0, &vcpu->reg_cached);
bitmap_clear_nolock(CPU_REG_CR4, &vcpu->reg_cached);
exec_vmwrite(VMX_GUEST_CR3, vmcs12->host_cr3);
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);

View File

@ -164,12 +164,12 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
uint32_t i;
/* mark to update on-demand run_context for efer/rflags/rsp/rip/cr0/cr4 */
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_CR0, &vcpu->reg_updated);
bitmap_set_lock(CPU_REG_CR4, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RIP, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_CR0, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_CR4, &vcpu->reg_updated);
/* VMCS Execution field */
exec_vmwrite64(VMX_TSC_OFFSET_FULL, ext_ctx->tsc_offset);

View File

@ -58,7 +58,7 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (!bitmap_test(CPU_REG_RIP, &vcpu->reg_updated) &&
!bitmap_test_and_set_lock(CPU_REG_RIP, &vcpu->reg_cached)) {
!bitmap_test_and_set_nolock(CPU_REG_RIP, &vcpu->reg_cached)) {
ctx->rip = exec_vmread(VMX_GUEST_RIP);
}
return ctx->rip;
@ -67,7 +67,7 @@ uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
void vcpu_set_rip(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val;
bitmap_set_lock(CPU_REG_RIP, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RIP, &vcpu->reg_updated);
}
uint64_t vcpu_get_rsp(const struct acrn_vcpu *vcpu)
@ -84,7 +84,7 @@ void vcpu_set_rsp(struct acrn_vcpu *vcpu, uint64_t val)
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
ctx->cpu_regs.regs.rsp = val;
bitmap_set_lock(CPU_REG_RSP, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RSP, &vcpu->reg_updated);
}
uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
@ -109,7 +109,7 @@ void vcpu_set_efer(struct acrn_vcpu *vcpu, uint64_t val)
}
/* Write the new value to VMCS in either case */
bitmap_set_lock(CPU_REG_EFER, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_EFER, &vcpu->reg_updated);
}
uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
@ -118,8 +118,7 @@ uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (!bitmap_test(CPU_REG_RFLAGS, &vcpu->reg_updated) &&
!bitmap_test_and_set_lock(CPU_REG_RFLAGS,
&vcpu->reg_cached) && vcpu->launched) {
!bitmap_test_and_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_cached) && vcpu->launched) {
ctx->rflags = exec_vmread(VMX_GUEST_RFLAGS);
}
return ctx->rflags;
@ -129,7 +128,7 @@ void vcpu_set_rflags(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rflags =
val;
bitmap_set_lock(CPU_REG_RFLAGS, &vcpu->reg_updated);
bitmap_set_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated);
}
uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr)
@ -625,16 +624,16 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
struct run_context *ctx =
&vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_clear_lock(CPU_REG_RIP, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_RIP, &vcpu->reg_updated)) {
exec_vmwrite(VMX_GUEST_RIP, ctx->rip);
}
if (bitmap_test_and_clear_lock(CPU_REG_RSP, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_RSP, &vcpu->reg_updated)) {
exec_vmwrite(VMX_GUEST_RSP, ctx->cpu_regs.regs.rsp);
}
if (bitmap_test_and_clear_lock(CPU_REG_EFER, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_EFER, &vcpu->reg_updated)) {
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, ctx->ia32_efer);
}
if (bitmap_test_and_clear_lock(CPU_REG_RFLAGS, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_RFLAGS, &vcpu->reg_updated)) {
exec_vmwrite(VMX_GUEST_RFLAGS, ctx->rflags);
}
@ -643,11 +642,11 @@ static void write_cached_registers(struct acrn_vcpu *vcpu)
* switching. There should no other module request updating
* CR0/CR4 here.
*/
if (bitmap_test_and_clear_lock(CPU_REG_CR0, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_CR0, &vcpu->reg_updated)) {
vcpu_set_cr0(vcpu, ctx->cr0);
}
if (bitmap_test_and_clear_lock(CPU_REG_CR4, &vcpu->reg_updated)) {
if (bitmap_test_and_clear_nolock(CPU_REG_CR4, &vcpu->reg_updated)) {
vcpu_set_cr4(vcpu, ctx->cr4);
}
}

View File

@ -318,7 +318,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t value)
exec_vmwrite(VMX_CR0_READ_SHADOW, effective_cr0);
/* clear read cache, next time read should from VMCS */
bitmap_clear_lock(CPU_REG_CR0, &vcpu->reg_cached);
bitmap_clear_nolock(CPU_REG_CR0, &vcpu->reg_cached);
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR0", effective_cr0, tmp);
}
@ -420,7 +420,7 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
exec_vmwrite(VMX_CR4_READ_SHADOW, cr4);
/* clear read cache, next time read should from VMCS */
bitmap_clear_lock(CPU_REG_CR4, &vcpu->reg_cached);
bitmap_clear_nolock(CPU_REG_CR4, &vcpu->reg_cached);
pr_dbg("VMM: Try to write %016lx, allow to write 0x%016lx to CR4", cr4, tmp);
}
@ -521,7 +521,7 @@ uint64_t vcpu_get_cr0(struct acrn_vcpu *vcpu)
{
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_set_lock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
if (bitmap_test_and_set_nolock(CPU_REG_CR0, &vcpu->reg_cached) == 0) {
ctx->cr0 = (exec_vmread(VMX_CR0_READ_SHADOW) & ~cr0_passthru_mask) |
(exec_vmread(VMX_GUEST_CR0) & cr0_passthru_mask);
}
@ -549,7 +549,7 @@ uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu)
{
struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx;
if (bitmap_test_and_set_lock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
if (bitmap_test_and_set_nolock(CPU_REG_CR4, &vcpu->reg_cached) == 0) {
ctx->cr4 = (exec_vmread(VMX_CR4_READ_SHADOW) & ~cr4_passthru_mask) |
(exec_vmread(VMX_GUEST_CR4) & cr4_passthru_mask);
}