mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-23 14:07:42 +00:00
hv: remove xsave dependence
ACRN could run without XSAVE Capability. So remove XSAVE dependence to support more (hardware or virtual) platforms. Tracked-On: #6287 Signed-off-by: Fei Li <fei1.li@intel.com>
This commit is contained in:
parent
84235bf07c
commit
20061b7c39
@ -538,6 +538,7 @@ static void init_pcpu_xsave(void)
|
|||||||
uint64_t xcr0, xss;
|
uint64_t xcr0, xss;
|
||||||
uint32_t eax, ecx, unused, xsave_area_size;
|
uint32_t eax, ecx, unused, xsave_area_size;
|
||||||
|
|
||||||
|
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||||
CPU_CR_READ(cr4, &val64);
|
CPU_CR_READ(cr4, &val64);
|
||||||
val64 |= CR4_OSXSAVE;
|
val64 |= CR4_OSXSAVE;
|
||||||
CPU_CR_WRITE(cr4, val64);
|
CPU_CR_WRITE(cr4, val64);
|
||||||
@ -571,6 +572,7 @@ static void init_pcpu_xsave(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_keylocker(void)
|
static void init_keylocker(void)
|
||||||
|
@ -415,6 +415,26 @@ static inline bool pcpu_has_vmx_unrestricted_guest_cap(void)
|
|||||||
return ((msr_read(MSR_IA32_VMX_MISC) & MSR_IA32_MISC_UNRESTRICTED_GUEST) != 0UL);
|
return ((msr_read(MSR_IA32_VMX_MISC) & MSR_IA32_MISC_UNRESTRICTED_GUEST) != 0UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_valid_xsave_combination(void)
|
||||||
|
{
|
||||||
|
uint64_t value64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
|
||||||
|
uint32_t high = (uint32_t)(value64 >> 32U); /* allowed 1-settings */
|
||||||
|
bool ret;
|
||||||
|
|
||||||
|
/* Now we only assume the platform must support XSAVE on CPU side and XSVE_XRSTR on VMX side or not,
|
||||||
|
* in this case, we could pass through CR4.OSXSAVE bit.
|
||||||
|
*/
|
||||||
|
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||||
|
ret = pcpu_has_cap(X86_FEATURE_XSAVES) && pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
|
||||||
|
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U);
|
||||||
|
} else {
|
||||||
|
ret = !pcpu_has_cap(X86_FEATURE_XSAVES) && !pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
|
||||||
|
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) == 0U);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t check_vmx_mmu_cap(void)
|
static int32_t check_vmx_mmu_cap(void)
|
||||||
{
|
{
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
@ -519,18 +539,15 @@ int32_t detect_hardware_support(void)
|
|||||||
} else if (!pcpu_has_cap(X86_FEATURE_POPCNT)) {
|
} else if (!pcpu_has_cap(X86_FEATURE_POPCNT)) {
|
||||||
printf("%s, popcnt instruction not supported\n", __func__);
|
printf("%s, popcnt instruction not supported\n", __func__);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
} else if (!pcpu_has_cap(X86_FEATURE_XSAVES)) {
|
|
||||||
printf("%s, XSAVES not supported\n", __func__);
|
|
||||||
ret = -ENODEV;
|
|
||||||
} else if (!pcpu_has_cap(X86_FEATURE_SSE)) {
|
} else if (!pcpu_has_cap(X86_FEATURE_SSE)) {
|
||||||
printf("%s, SSE not supported\n", __func__);
|
printf("%s, SSE not supported\n", __func__);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
} else if (!pcpu_has_cap(X86_FEATURE_COMPACTION_EXT)) {
|
|
||||||
printf("%s, Compaction extensions in XSAVE is not supported\n", __func__);
|
|
||||||
ret = -ENODEV;
|
|
||||||
} else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) {
|
} else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) {
|
||||||
printf("%s, RDRAND is not supported\n", __func__);
|
printf("%s, RDRAND is not supported\n", __func__);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
|
} else if (!is_valid_xsave_combination()) {
|
||||||
|
printf("%s, check XSAVE combined Capability failed\n", __func__);
|
||||||
|
ret = -ENODEV;
|
||||||
} else {
|
} else {
|
||||||
ret = check_vmx_mmu_cap();
|
ret = check_vmx_mmu_cap();
|
||||||
}
|
}
|
||||||
|
@ -314,6 +314,10 @@ static void init_xsave(struct acrn_vcpu *vcpu)
|
|||||||
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||||
struct xsave_area *area = &ectx->xs_area;
|
struct xsave_area *area = &ectx->xs_area;
|
||||||
|
|
||||||
|
/* if the HW has this cap, we need to prepare the buffer for potential save/restore.
|
||||||
|
* Guest may or may not enable XSAVE -- it doesn't matter.
|
||||||
|
*/
|
||||||
|
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||||
ectx->xcr0 = XSAVE_FPU;
|
ectx->xcr0 = XSAVE_FPU;
|
||||||
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
||||||
|
|
||||||
@ -321,6 +325,7 @@ static void init_xsave(struct acrn_vcpu *vcpu)
|
|||||||
* keep the reset area in header area as zero.
|
* keep the reset area in header area as zero.
|
||||||
*/
|
*/
|
||||||
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_regs *vcpu_regs)
|
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_regs *vcpu_regs)
|
||||||
@ -827,19 +832,23 @@ void zombie_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_xsave_area(__unused struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
||||||
{
|
{
|
||||||
|
if (vcpu->arch.xsave_enabled) {
|
||||||
ectx->xcr0 = read_xcr(0);
|
ectx->xcr0 = read_xcr(0);
|
||||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||||
xsaves(&ectx->xs_area, UINT64_MAX);
|
xsaves(&ectx->xs_area, UINT64_MAX);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx)
|
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx)
|
||||||
{
|
{
|
||||||
|
if (vcpu->arch.xsave_enabled) {
|
||||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||||
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
||||||
xrstors(&ectx->xs_area, UINT64_MAX);
|
xrstors(&ectx->xs_area, UINT64_MAX);
|
||||||
write_xcr(0, ectx->xcr0);
|
write_xcr(0, ectx->xcr0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO:
|
/* TODO:
|
||||||
|
@ -321,7 +321,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
|
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||||
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |VMX_PROCBASED_CTLS2_VPID |
|
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |VMX_PROCBASED_CTLS2_VPID |
|
||||||
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT |
|
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT | VMX_PROCBASED_CTLS2_XSVE_XRSTR |
|
||||||
VMX_PROCBASED_CTLS2_PAUSE_LOOP | VMX_PROCBASED_CTLS2_UWAIT_PAUSE);
|
VMX_PROCBASED_CTLS2_PAUSE_LOOP | VMX_PROCBASED_CTLS2_UWAIT_PAUSE);
|
||||||
|
|
||||||
/* SDM Vol3, 25.3, setting "enable INVPCID" VM-execution to 1 with "INVLPG exiting" disabled,
|
/* SDM Vol3, 25.3, setting "enable INVPCID" VM-execution to 1 with "INVLPG exiting" disabled,
|
||||||
@ -348,9 +348,11 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||||||
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
if ((value32 & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U) {
|
||||||
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
|
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
|
||||||
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
vcpu->arch.xsave_enabled = true;
|
||||||
|
} else {
|
||||||
|
value32 &= ~VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
value32 |= VMX_PROCBASED_CTLS2_WBINVD;
|
value32 |= VMX_PROCBASED_CTLS2_WBINVD;
|
||||||
|
@ -392,48 +392,47 @@ int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int32_t idx;
|
int32_t idx, ret = -1; /* ret < 0 call vcpu_inject_gp(vcpu, 0U) */
|
||||||
|
uint32_t cpl;
|
||||||
uint64_t val64;
|
uint64_t val64;
|
||||||
int32_t ret = 0;
|
|
||||||
|
|
||||||
|
if (vcpu->arch.xsave_enabled && ((vcpu_get_cr4(vcpu) && CR4_OSXSAVE) != 0UL)) {
|
||||||
idx = vcpu->arch.cur_context;
|
idx = vcpu->arch.cur_context;
|
||||||
if (idx >= NR_WORLD) {
|
/* get current privilege level */
|
||||||
ret = -1;
|
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
|
||||||
} else {
|
cpl = (cpl >> 5U) & 3U;
|
||||||
|
|
||||||
|
if ((idx < NR_WORLD) && (cpl == 0U)) {
|
||||||
/* to access XCR0,'ecx' should be 0 */
|
/* to access XCR0,'ecx' should be 0 */
|
||||||
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) != 0UL) {
|
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) == 0UL) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
|
||||||
} else {
|
|
||||||
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
||||||
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
||||||
|
|
||||||
/* bit 0(x87 state) of XCR0 can't be cleared */
|
/* bit 0(x87 state) of XCR0 can't be cleared */
|
||||||
if ((val64 & 0x01UL) == 0UL) {
|
if (((val64 & 0x01UL) != 0UL) && ((val64 & XCR0_RESERVED_BITS) == 0UL)) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
|
||||||
} else if ((val64 & XCR0_RESERVED_BITS) != 0UL) {
|
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* XCR0[2:1] (SSE state & AVX state) can't not be
|
* XCR0[2:1] (SSE state & AVX state) can't not be
|
||||||
* set to 10b as it is necessary to set both bits
|
* set to 10b as it is necessary to set both bits
|
||||||
* to use AVX instructions.
|
* to use AVX instructions.
|
||||||
*/
|
*/
|
||||||
if ((val64 & (XCR0_SSE | XCR0_AVX)) == XCR0_AVX) {
|
if ((val64 & (XCR0_SSE | XCR0_AVX)) != XCR0_AVX) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
||||||
* Guest should not set these two bits without MPX support.
|
* Guest should not set these two bits without MPX support.
|
||||||
*/
|
*/
|
||||||
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) != 0UL) {
|
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) == 0UL) {
|
||||||
vcpu_inject_gp(vcpu, 0U);
|
|
||||||
} else {
|
|
||||||
write_xcr(0, val64);
|
write_xcr(0, val64);
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
/* CPUID.01H:ECX.XSAVE[bit 26] = 0 */
|
||||||
|
vcpu_inject_ud(vcpu);
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -903,12 +903,16 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
case MSR_IA32_XSS:
|
case MSR_IA32_XSS:
|
||||||
{
|
{
|
||||||
|
if (vcpu->arch.xsave_enabled) {
|
||||||
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
||||||
err = -EACCES;
|
err = -EACCES;
|
||||||
} else {
|
} else {
|
||||||
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
||||||
msr_write(msr, v);
|
msr_write(msr, v);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
err = -EACCES;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MSR_IA32_COPY_LOCAL_TO_PLATFORM:
|
case MSR_IA32_COPY_LOCAL_TO_PLATFORM:
|
||||||
|
@ -253,6 +253,7 @@ struct acrn_vcpu_arch {
|
|||||||
uint8_t lapic_mask;
|
uint8_t lapic_mask;
|
||||||
bool irq_window_enabled;
|
bool irq_window_enabled;
|
||||||
bool emulating_lock;
|
bool emulating_lock;
|
||||||
|
bool xsave_enabled;
|
||||||
uint32_t nrexits;
|
uint32_t nrexits;
|
||||||
|
|
||||||
/* VCPU context state information */
|
/* VCPU context state information */
|
||||||
|
Loading…
Reference in New Issue
Block a user