hv: cleanup IA32_PAT emulation code r.w.t. to the refactored guest_msrs[]

Currently there are two fields in ext_context to emulate IA32_PAT MSR:
- ia32_pat: hold the value of the emulated IA32_PAT MSR
- vmx_ia32_pat: used for load/store IA32_PAT MSR during world switch

This patch moves ext_context->ia32_pat to the common placeholder for
emulated MSRs acrn_vcpu_arch->guest_msrs[].

Also it renames ext_context->vmx_ia32_pat to ext_context->ia32_pat to
retain same naming convention in struct ext_context.

Tracked-On: #1867
Signed-off-by: Zide Chen <zide.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Zide Chen 2018-10-03 08:43:42 -07:00 committed by wenlingz
parent b6aaf1b8d9
commit 9761eede2a
4 changed files with 12 additions and 30 deletions

View File

@ -146,18 +146,6 @@ inline void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val)
vmx_write_cr4(vcpu, val);
}
inline uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu)
{
return vcpu->arch.contexts[vcpu->arch.cur_context].
ext_ctx.ia32_pat;
}
inline void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val)
{
vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx.ia32_pat
= val;
}
uint64_t vcpu_get_guest_msr(const struct acrn_vcpu *vcpu, uint32_t msr)
{
uint32_t index = vmsr_get_guest_msr_index(msr);

View File

@ -177,13 +177,13 @@ static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
/*
* Similar to CR0 and CR4, the actual value of guest's IA32_PAT MSR
* (represented by ext_ctx->vmx_ia32_pat) could be different from the
* value that guest reads (represented by ext_ctx->ia32_pat).
* (represented by ext_ctx->ia32_pat) could be different from the
* value that guest reads (guest_msrs[IA32_PAT]).
*
* the wrmsr handler keeps track of 'ia32_pat', and we only
* need to load 'vmx_ia32_pat' here.
* the wrmsr handler keeps track of 'guest_msrs', and we only
* need to save/load 'ext_ctx->ia32_pat' in world switch.
*/
ext_ctx->vmx_ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
ext_ctx->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
ext_ctx->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
ext_ctx->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
ext_ctx->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
@ -237,7 +237,7 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
exec_vmwrite(VMX_GUEST_CR3, ext_ctx->cr3);
exec_vmwrite(VMX_GUEST_DR7, ext_ctx->dr7);
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, ext_ctx->ia32_debugctl);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, ext_ctx->vmx_ia32_pat);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, ext_ctx->ia32_pat);
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, ext_ctx->ia32_sysenter_cs);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, ext_ctx->ia32_sysenter_esp);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, ext_ctx->ia32_sysenter_eip);
@ -426,8 +426,6 @@ static bool init_secure_world_env(struct acrn_vcpu *vcpu,
TRUSTY_EPT_REBASE_GPA + size;
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.tsc_offset = 0UL;
vcpu->arch.contexts[SECURE_WORLD].ext_ctx.ia32_pat =
vcpu->arch.contexts[NORMAL_WORLD].ext_ctx.ia32_pat;
/* Init per world MSRs */
for (i = 0U; i < NUM_WORLD_MSRS; i++) {

View File

@ -260,11 +260,11 @@ static void init_cr0_cr4_host_mask(void)
uint64_t vmx_rdmsr_pat(const struct acrn_vcpu *vcpu)
{
/*
* note: if context->cr0.CD is set, the actual value in guest's
* note: if run_ctx->cr0.CD is set, the actual value in guest's
* IA32_PAT MSR is PAT_ALL_UC_VALUE, which may be different from
* the saved value saved_context->ia32_pat
* the saved value guest_msrs[MSR_IA32_PAT]
*/
return vcpu_get_pat_ext(vcpu);
return vcpu_get_guest_msr(vcpu, MSR_IA32_PAT);
}
int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
@ -281,7 +281,7 @@ int vmx_wrmsr_pat(struct acrn_vcpu *vcpu, uint64_t value)
}
}
vcpu_set_pat_ext(vcpu, value);
vcpu_set_guest_msr(vcpu, MSR_IA32_PAT, value);
/*
* If context->cr0.CD is set, we defer any further requests to write
@ -430,7 +430,7 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
} else {
/* Restore IA32_PAT to enable cache again */
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL,
vcpu_get_pat_ext(vcpu));
vcpu_get_guest_msr(vcpu, MSR_IA32_PAT));
}
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
@ -592,7 +592,7 @@ static void init_guest_vmx(struct acrn_vcpu *vcpu, uint64_t cr0, uint64_t cr3,
exec_vmwrite32(VMX_GUEST_INTERRUPTIBILITY_INFO, 0U);
exec_vmwrite32(VMX_GUEST_ACTIVITY_STATE, 0U);
exec_vmwrite32(VMX_GUEST_SMBASE, 0U);
vcpu_set_pat_ext(vcpu, PAT_POWER_ON_VALUE);
vcpu_set_guest_msr(vcpu, MSR_IA32_PAT, PAT_POWER_ON_VALUE);
exec_vmwrite(VMX_GUEST_IA32_PAT_FULL, PAT_POWER_ON_VALUE);
exec_vmwrite(VMX_GUEST_DR7, DR7_INIT_VALUE);
}

View File

@ -141,7 +141,6 @@ struct ext_context {
uint64_t ia32_kernel_gs_base;
uint64_t ia32_pat;
uint64_t vmx_ia32_pat;
uint32_t ia32_sysenter_cs;
uint64_t ia32_sysenter_esp;
uint64_t ia32_sysenter_eip;
@ -470,9 +469,6 @@ uint64_t vcpu_get_cr4(struct acrn_vcpu *vcpu);
*/
void vcpu_set_cr4(struct acrn_vcpu *vcpu, uint64_t val);
uint64_t vcpu_get_pat_ext(const struct acrn_vcpu *vcpu);
void vcpu_set_pat_ext(struct acrn_vcpu *vcpu, uint64_t val);
/**
* @brief get guest emulated MSR
*