mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-24 02:08:04 +00:00
hv: remove xsave dependence
ACRN could run without XSAVE Capability. So remove XSAVE dependence to support more (hardware or virtual) platforms. Tracked-On: #6287 Signed-off-by: Fei Li <fei1.li@intel.com>
This commit is contained in:
@@ -314,13 +314,18 @@ static void init_xsave(struct acrn_vcpu *vcpu)
|
||||
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||
struct xsave_area *area = &ectx->xs_area;
|
||||
|
||||
ectx->xcr0 = XSAVE_FPU;
|
||||
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
||||
|
||||
/* xsaves only support compacted format, so set it in xcomp_bv[63],
|
||||
* keep the reset area in header area as zero.
|
||||
/* if the HW has this cap, we need to prepare the buffer for potential save/restore.
|
||||
* Guest may or may not enable XSAVE -- it doesn't matter.
|
||||
*/
|
||||
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
||||
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||
ectx->xcr0 = XSAVE_FPU;
|
||||
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
||||
|
||||
/* xsaves only support compacted format, so set it in xcomp_bv[63],
|
||||
* keep the reset area in header area as zero.
|
||||
*/
|
||||
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
||||
}
|
||||
}
|
||||
|
||||
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_regs *vcpu_regs)
|
||||
@@ -827,19 +832,23 @@ void zombie_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
}
|
||||
}
|
||||
|
||||
void save_xsave_area(__unused struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
||||
void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
||||
{
|
||||
ectx->xcr0 = read_xcr(0);
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
xsaves(&ectx->xs_area, UINT64_MAX);
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
ectx->xcr0 = read_xcr(0);
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
xsaves(&ectx->xs_area, UINT64_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx)
|
||||
{
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
||||
xrstors(&ectx->xs_area, UINT64_MAX);
|
||||
write_xcr(0, ectx->xcr0);
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
||||
xrstors(&ectx->xs_area, UINT64_MAX);
|
||||
write_xcr(0, ectx->xcr0);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
|
@@ -321,7 +321,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |VMX_PROCBASED_CTLS2_VPID |
|
||||
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT |
|
||||
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT | VMX_PROCBASED_CTLS2_XSVE_XRSTR |
|
||||
VMX_PROCBASED_CTLS2_PAUSE_LOOP | VMX_PROCBASED_CTLS2_UWAIT_PAUSE);
|
||||
|
||||
/* SDM Vol3, 25.3, setting "enable INVPCID" VM-execution to 1 with "INVLPG exiting" disabled,
|
||||
@@ -348,9 +348,11 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
||||
}
|
||||
|
||||
if (pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
||||
if ((value32 & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U) {
|
||||
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
|
||||
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||
vcpu->arch.xsave_enabled = true;
|
||||
} else {
|
||||
value32 &= ~VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||
}
|
||||
|
||||
value32 |= VMX_PROCBASED_CTLS2_WBINVD;
|
||||
|
@@ -392,47 +392,46 @@ int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t idx;
|
||||
int32_t idx, ret = -1; /* ret < 0 call vcpu_inject_gp(vcpu, 0U) */
|
||||
uint32_t cpl;
|
||||
uint64_t val64;
|
||||
int32_t ret = 0;
|
||||
|
||||
idx = vcpu->arch.cur_context;
|
||||
if (idx >= NR_WORLD) {
|
||||
ret = -1;
|
||||
} else {
|
||||
/* to access XCR0,'ecx' should be 0 */
|
||||
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
||||
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
||||
if (vcpu->arch.xsave_enabled && ((vcpu_get_cr4(vcpu) && CR4_OSXSAVE) != 0UL)) {
|
||||
idx = vcpu->arch.cur_context;
|
||||
/* get current privilege level */
|
||||
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
|
||||
cpl = (cpl >> 5U) & 3U;
|
||||
|
||||
/* bit 0(x87 state) of XCR0 can't be cleared */
|
||||
if ((val64 & 0x01UL) == 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else if ((val64 & XCR0_RESERVED_BITS) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
/*
|
||||
* XCR0[2:1] (SSE state & AVX state) can't not be
|
||||
* set to 10b as it is necessary to set both bits
|
||||
* to use AVX instructions.
|
||||
*/
|
||||
if ((val64 & (XCR0_SSE | XCR0_AVX)) == XCR0_AVX) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
if ((idx < NR_WORLD) && (cpl == 0U)) {
|
||||
/* to access XCR0,'ecx' should be 0 */
|
||||
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) == 0UL) {
|
||||
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
||||
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
||||
|
||||
/* bit 0(x87 state) of XCR0 can't be cleared */
|
||||
if (((val64 & 0x01UL) != 0UL) && ((val64 & XCR0_RESERVED_BITS) == 0UL)) {
|
||||
/*
|
||||
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
||||
* Guest should not set these two bits without MPX support.
|
||||
* XCR0[2:1] (SSE state & AVX state) can't not be
|
||||
* set to 10b as it is necessary to set both bits
|
||||
* to use AVX instructions.
|
||||
*/
|
||||
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
write_xcr(0, val64);
|
||||
if ((val64 & (XCR0_SSE | XCR0_AVX)) != XCR0_AVX) {
|
||||
/*
|
||||
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
||||
* Guest should not set these two bits without MPX support.
|
||||
*/
|
||||
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) == 0UL) {
|
||||
write_xcr(0, val64);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* CPUID.01H:ECX.XSAVE[bit 26] = 0 */
|
||||
vcpu_inject_ud(vcpu);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@@ -903,11 +903,15 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
case MSR_IA32_XSS:
|
||||
{
|
||||
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
||||
err = -EACCES;
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
||||
err = -EACCES;
|
||||
} else {
|
||||
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
||||
msr_write(msr, v);
|
||||
}
|
||||
} else {
|
||||
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
||||
msr_write(msr, v);
|
||||
err = -EACCES;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
Reference in New Issue
Block a user