hv: vcr: check guest cr3 before loading pdptrs

Check whether the address area pointed by the guest
cr3 is valid or not before loading pdptrs. Inject #GP(0)
to guest if there are any invalid cases.

Tracked-On: #3572
Signed-off-by: Jie Deng <jie.deng@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jie Deng 2019-08-12 11:47:39 +08:00 committed by ACRN System Integration
parent 5b5efe7193
commit 866935a53f

View File

@ -34,23 +34,53 @@
CR4_OSXMMEXCPT | CR4_SMAP | CR4_PKE | \ CR4_OSXMMEXCPT | CR4_SMAP | CR4_PKE | \
CR4_SMXE | CR4_UMIP) CR4_SMXE | CR4_UMIP)
/* PAE PDPTE bits 1 ~ 2, 5 ~ 8 are always reserved */
#define PAE_PDPTE_FIXED_RESVD_BITS 0x00000000000001E6UL
static uint64_t cr0_always_on_mask; static uint64_t cr0_always_on_mask;
static uint64_t cr0_always_off_mask; static uint64_t cr0_always_off_mask;
static uint64_t cr4_always_on_mask; static uint64_t cr4_always_on_mask;
static uint64_t cr4_always_off_mask; static uint64_t cr4_always_off_mask;
static void load_pdptrs(const struct acrn_vcpu *vcpu) static int32_t load_pdptrs(const struct acrn_vcpu *vcpu)
{ {
uint64_t guest_cr3 = exec_vmread(VMX_GUEST_CR3); uint64_t guest_cr3 = exec_vmread(VMX_GUEST_CR3);
/* TODO: check whether guest cr3 is valid */ struct cpuinfo_x86 *cpu_info = get_pcpu_info();
uint64_t *guest_cr3_hva = (uint64_t *)gpa2hva(vcpu->vm, get_pae_pdpt_addr(guest_cr3)); int32_t ret = 0;
uint64_t pdpte[4]; /* Total four PDPTE */
uint64_t rsvd_bits_mask;
uint8_t maxphyaddr;
int32_t i;
stac(); /* check whether the address area pointed by the guest cr3
exec_vmwrite64(VMX_GUEST_PDPTE0_FULL, get_pgentry(guest_cr3_hva + 0UL)); * can be accessed or not
exec_vmwrite64(VMX_GUEST_PDPTE1_FULL, get_pgentry(guest_cr3_hva + 1UL)); */
exec_vmwrite64(VMX_GUEST_PDPTE2_FULL, get_pgentry(guest_cr3_hva + 2UL)); if (copy_from_gpa(vcpu->vm, pdpte, get_pae_pdpt_addr(guest_cr3), sizeof(pdpte)) != 0) {
exec_vmwrite64(VMX_GUEST_PDPTE3_FULL, get_pgentry(guest_cr3_hva + 3UL)); ret = -EFAULT;
clac(); } else {
/* Check if any of the PDPTEs sets both the P flag
* and any reserved bit
*/
maxphyaddr = cpu_info->phys_bits;
/* reserved bits: 1~2, 5~8, maxphyaddr ~ 63 */
rsvd_bits_mask = (63U < maxphyaddr) ? 0UL : (((1UL << (63U - maxphyaddr + 1U)) - 1UL) << maxphyaddr);
rsvd_bits_mask |= PAE_PDPTE_FIXED_RESVD_BITS;
for (i = 0; i < 4; i++) {
if (((pdpte[i] & PAGE_PRESENT) != 0UL) && ((pdpte[i] & rsvd_bits_mask) != 0UL)) {
ret = -EFAULT;
break;
}
}
}
if (ret == 0) {
exec_vmwrite64(VMX_GUEST_PDPTE0_FULL, pdpte[0]);
exec_vmwrite64(VMX_GUEST_PDPTE1_FULL, pdpte[1]);
exec_vmwrite64(VMX_GUEST_PDPTE2_FULL, pdpte[2]);
exec_vmwrite64(VMX_GUEST_PDPTE3_FULL, pdpte[3]);
}
return ret;
} }
static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0) static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
@ -113,6 +143,8 @@ static bool is_cr0_write_valid(struct acrn_vcpu *vcpu, uint64_t cr0)
*/ */
static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0) static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
{ {
bool err_found = false;
if (!is_cr0_write_valid(vcpu, cr0)) { if (!is_cr0_write_valid(vcpu, cr0)) {
pr_dbg("Invalid cr0 write operation from guest"); pr_dbg("Invalid cr0 write operation from guest");
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
@ -140,7 +172,10 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
vcpu_set_efer(vcpu, vcpu_get_efer(vcpu) | MSR_IA32_EFER_LMA_BIT); vcpu_set_efer(vcpu, vcpu_get_efer(vcpu) | MSR_IA32_EFER_LMA_BIT);
} else if (is_pae(vcpu)) { } else if (is_pae(vcpu)) {
/* enabled PAE from paging disabled */ /* enabled PAE from paging disabled */
load_pdptrs(vcpu); if (load_pdptrs(vcpu) != 0) {
err_found = true;
vcpu_inject_gp(vcpu, 0U);
}
} else { } else {
/* do nothing */ /* do nothing */
} }
@ -158,6 +193,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
/* do nothing */ /* do nothing */
} }
if (err_found == false) {
/* If CR0.CD or CR0.NW get cr0_changed_bits */ /* If CR0.CD or CR0.NW get cr0_changed_bits */
if ((cr0_changed_bits & (CR0_CD | CR0_NW)) != 0UL) { if ((cr0_changed_bits & (CR0_CD | CR0_NW)) != 0UL) {
/* No action if only CR0.NW is cr0_changed_bits */ /* No action if only CR0.NW is cr0_changed_bits */
@ -201,6 +237,7 @@ static void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx); pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR0", cr0_mask, cr0_vmx);
} }
}
} }
static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4) static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
@ -268,6 +305,8 @@ static bool is_cr4_write_valid(struct acrn_vcpu *vcpu, uint64_t cr4)
*/ */
static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4) static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
{ {
bool err_found = false;
if (!is_cr4_write_valid(vcpu, cr4)) { if (!is_cr4_write_valid(vcpu, cr4)) {
pr_dbg("Invalid cr4 write operation from guest"); pr_dbg("Invalid cr4 write operation from guest");
vcpu_inject_gp(vcpu, 0U); vcpu_inject_gp(vcpu, 0U);
@ -277,12 +316,17 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
if (((cr4 ^ old_cr4) & (CR4_PGE | CR4_PSE | CR4_PAE | CR4_SMEP | CR4_SMAP | CR4_PKE)) != 0UL) { if (((cr4 ^ old_cr4) & (CR4_PGE | CR4_PSE | CR4_PAE | CR4_SMEP | CR4_SMAP | CR4_PKE)) != 0UL) {
if (((cr4 & CR4_PAE) != 0UL) && (is_paging_enabled(vcpu)) && (!is_long_mode(vcpu))) { if (((cr4 & CR4_PAE) != 0UL) && (is_paging_enabled(vcpu)) && (!is_long_mode(vcpu))) {
load_pdptrs(vcpu); if (load_pdptrs(vcpu) != 0) {
err_found = true;
vcpu_inject_gp(vcpu, 0U);
} }
}
if (err_found == false) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH); vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
} }
}
if (err_found == false) {
/* Clear forced off bits */ /* Clear forced off bits */
cr4_shadow = cr4 & ~CR4_MCE; cr4_shadow = cr4 & ~CR4_MCE;
@ -295,6 +339,7 @@ static void vmx_write_cr4(struct acrn_vcpu *vcpu, uint64_t cr4)
pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", cr4, cr4_vmx); pr_dbg("VMM: Try to write %016llx, allow to write 0x%016llx to CR4", cr4, cr4_vmx);
} }
}
} }
void init_cr0_cr4_host_mask(void) void init_cr0_cr4_host_mask(void)