mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-22 13:37:10 +00:00
hv: remove xsave dependence
ACRN could run without XSAVE Capability. So remove XSAVE dependence to support more (hardware or virtual) platforms. Tracked-On: #6287 Signed-off-by: Fei Li <fei1.li@intel.com>
This commit is contained in:
parent
84235bf07c
commit
20061b7c39
@ -538,36 +538,38 @@ static void init_pcpu_xsave(void)
|
||||
uint64_t xcr0, xss;
|
||||
uint32_t eax, ecx, unused, xsave_area_size;
|
||||
|
||||
CPU_CR_READ(cr4, &val64);
|
||||
val64 |= CR4_OSXSAVE;
|
||||
CPU_CR_WRITE(cr4, val64);
|
||||
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||
CPU_CR_READ(cr4, &val64);
|
||||
val64 |= CR4_OSXSAVE;
|
||||
CPU_CR_WRITE(cr4, val64);
|
||||
|
||||
if (get_pcpu_id() == BSP_CPU_ID) {
|
||||
cpuid_subleaf(CPUID_FEATURES, 0x0U, &unused, &unused, &ecx, &unused);
|
||||
if (get_pcpu_id() == BSP_CPU_ID) {
|
||||
cpuid_subleaf(CPUID_FEATURES, 0x0U, &unused, &unused, &ecx, &unused);
|
||||
|
||||
/* if set, update it */
|
||||
if ((ecx & CPUID_ECX_OSXSAVE) != 0U) {
|
||||
cpu_info = get_pcpu_info();
|
||||
cpu_info->cpuid_leaves[FEAT_1_ECX] |= CPUID_ECX_OSXSAVE;
|
||||
/* if set, update it */
|
||||
if ((ecx & CPUID_ECX_OSXSAVE) != 0U) {
|
||||
cpu_info = get_pcpu_info();
|
||||
cpu_info->cpuid_leaves[FEAT_1_ECX] |= CPUID_ECX_OSXSAVE;
|
||||
|
||||
/* set xcr0 and xss with the componets bitmap get from cpuid */
|
||||
xcr0 = ((uint64_t)cpu_info->cpuid_leaves[FEAT_D_0_EDX] << 32U)
|
||||
+ cpu_info->cpuid_leaves[FEAT_D_0_EAX];
|
||||
xss = ((uint64_t)cpu_info->cpuid_leaves[FEAT_D_1_EDX] << 32U)
|
||||
+ cpu_info->cpuid_leaves[FEAT_D_1_ECX];
|
||||
write_xcr(0, xcr0);
|
||||
msr_write(MSR_IA32_XSS, xss);
|
||||
/* set xcr0 and xss with the componets bitmap get from cpuid */
|
||||
xcr0 = ((uint64_t)cpu_info->cpuid_leaves[FEAT_D_0_EDX] << 32U)
|
||||
+ cpu_info->cpuid_leaves[FEAT_D_0_EAX];
|
||||
xss = ((uint64_t)cpu_info->cpuid_leaves[FEAT_D_1_EDX] << 32U)
|
||||
+ cpu_info->cpuid_leaves[FEAT_D_1_ECX];
|
||||
write_xcr(0, xcr0);
|
||||
msr_write(MSR_IA32_XSS, xss);
|
||||
|
||||
/* get xsave area size, containing all the state components
|
||||
* corresponding to bits currently set in XCR0 | IA32_XSS */
|
||||
cpuid_subleaf(CPUID_XSAVE_FEATURES, 1U,
|
||||
&eax,
|
||||
&xsave_area_size,
|
||||
&ecx,
|
||||
&unused);
|
||||
if (xsave_area_size > XSAVE_STATE_AREA_SIZE) {
|
||||
panic("XSAVE area (%d bytes) exceeds the pre-allocated 4K region\n",
|
||||
xsave_area_size);
|
||||
/* get xsave area size, containing all the state components
|
||||
* corresponding to bits currently set in XCR0 | IA32_XSS */
|
||||
cpuid_subleaf(CPUID_XSAVE_FEATURES, 1U,
|
||||
&eax,
|
||||
&xsave_area_size,
|
||||
&ecx,
|
||||
&unused);
|
||||
if (xsave_area_size > XSAVE_STATE_AREA_SIZE) {
|
||||
panic("XSAVE area (%d bytes) exceeds the pre-allocated 4K region\n",
|
||||
xsave_area_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -415,6 +415,26 @@ static inline bool pcpu_has_vmx_unrestricted_guest_cap(void)
|
||||
return ((msr_read(MSR_IA32_VMX_MISC) & MSR_IA32_MISC_UNRESTRICTED_GUEST) != 0UL);
|
||||
}
|
||||
|
||||
static bool is_valid_xsave_combination(void)
|
||||
{
|
||||
uint64_t value64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
|
||||
uint32_t high = (uint32_t)(value64 >> 32U); /* allowed 1-settings */
|
||||
bool ret;
|
||||
|
||||
/* Now we only assume the platform must support XSAVE on CPU side and XSVE_XRSTR on VMX side or not,
|
||||
* in this case, we could pass through CR4.OSXSAVE bit.
|
||||
*/
|
||||
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||
ret = pcpu_has_cap(X86_FEATURE_XSAVES) && pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
|
||||
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U);
|
||||
} else {
|
||||
ret = !pcpu_has_cap(X86_FEATURE_XSAVES) && !pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
|
||||
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) == 0U);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int32_t check_vmx_mmu_cap(void)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
@ -519,18 +539,15 @@ int32_t detect_hardware_support(void)
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_POPCNT)) {
|
||||
printf("%s, popcnt instruction not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_XSAVES)) {
|
||||
printf("%s, XSAVES not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_SSE)) {
|
||||
printf("%s, SSE not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_COMPACTION_EXT)) {
|
||||
printf("%s, Compaction extensions in XSAVE is not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) {
|
||||
printf("%s, RDRAND is not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!is_valid_xsave_combination()) {
|
||||
printf("%s, check XSAVE combined Capability failed\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
ret = check_vmx_mmu_cap();
|
||||
}
|
||||
|
@ -314,13 +314,18 @@ static void init_xsave(struct acrn_vcpu *vcpu)
|
||||
struct ext_context *ectx = &(vcpu->arch.contexts[vcpu->arch.cur_context].ext_ctx);
|
||||
struct xsave_area *area = &ectx->xs_area;
|
||||
|
||||
ectx->xcr0 = XSAVE_FPU;
|
||||
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
||||
|
||||
/* xsaves only support compacted format, so set it in xcomp_bv[63],
|
||||
* keep the reset area in header area as zero.
|
||||
/* if the HW has this cap, we need to prepare the buffer for potential save/restore.
|
||||
* Guest may or may not enable XSAVE -- it doesn't matter.
|
||||
*/
|
||||
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
||||
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
|
||||
ectx->xcr0 = XSAVE_FPU;
|
||||
(void)memset((void *)area, 0U, XSAVE_STATE_AREA_SIZE);
|
||||
|
||||
/* xsaves only support compacted format, so set it in xcomp_bv[63],
|
||||
* keep the reset area in header area as zero.
|
||||
*/
|
||||
ectx->xs_area.xsave_hdr.hdr.xcomp_bv |= XSAVE_COMPACTED_FORMAT;
|
||||
}
|
||||
}
|
||||
|
||||
void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_regs *vcpu_regs)
|
||||
@ -827,19 +832,23 @@ void zombie_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
}
|
||||
}
|
||||
|
||||
void save_xsave_area(__unused struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
||||
void save_xsave_area(struct acrn_vcpu *vcpu, struct ext_context *ectx)
|
||||
{
|
||||
ectx->xcr0 = read_xcr(0);
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
xsaves(&ectx->xs_area, UINT64_MAX);
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
ectx->xcr0 = read_xcr(0);
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
xsaves(&ectx->xs_area, UINT64_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *ectx)
|
||||
{
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
||||
xrstors(&ectx->xs_area, UINT64_MAX);
|
||||
write_xcr(0, ectx->xcr0);
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
write_xcr(0, ectx->xcr0 | XSAVE_SSE);
|
||||
msr_write(MSR_IA32_XSS, vcpu_get_guest_msr(vcpu, MSR_IA32_XSS));
|
||||
xrstors(&ectx->xs_area, UINT64_MAX);
|
||||
write_xcr(0, ectx->xcr0);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
|
@ -321,7 +321,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
value32 = check_vmx_ctrl(MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |VMX_PROCBASED_CTLS2_VPID |
|
||||
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT |
|
||||
VMX_PROCBASED_CTLS2_RDTSCP | VMX_PROCBASED_CTLS2_UNRESTRICT | VMX_PROCBASED_CTLS2_XSVE_XRSTR |
|
||||
VMX_PROCBASED_CTLS2_PAUSE_LOOP | VMX_PROCBASED_CTLS2_UWAIT_PAUSE);
|
||||
|
||||
/* SDM Vol3, 25.3, setting "enable INVPCID" VM-execution to 1 with "INVLPG exiting" disabled,
|
||||
@ -348,9 +348,11 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
||||
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
||||
}
|
||||
|
||||
if (pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
|
||||
if ((value32 & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U) {
|
||||
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
|
||||
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||
vcpu->arch.xsave_enabled = true;
|
||||
} else {
|
||||
value32 &= ~VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||
}
|
||||
|
||||
value32 |= VMX_PROCBASED_CTLS2_WBINVD;
|
||||
|
@ -392,47 +392,46 @@ int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
static int32_t xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
int32_t idx;
|
||||
int32_t idx, ret = -1; /* ret < 0 call vcpu_inject_gp(vcpu, 0U) */
|
||||
uint32_t cpl;
|
||||
uint64_t val64;
|
||||
int32_t ret = 0;
|
||||
|
||||
idx = vcpu->arch.cur_context;
|
||||
if (idx >= NR_WORLD) {
|
||||
ret = -1;
|
||||
} else {
|
||||
/* to access XCR0,'ecx' should be 0 */
|
||||
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
||||
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
||||
if (vcpu->arch.xsave_enabled && ((vcpu_get_cr4(vcpu) && CR4_OSXSAVE) != 0UL)) {
|
||||
idx = vcpu->arch.cur_context;
|
||||
/* get current privilege level */
|
||||
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
|
||||
cpl = (cpl >> 5U) & 3U;
|
||||
|
||||
/* bit 0(x87 state) of XCR0 can't be cleared */
|
||||
if ((val64 & 0x01UL) == 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else if ((val64 & XCR0_RESERVED_BITS) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
/*
|
||||
* XCR0[2:1] (SSE state & AVX state) can't not be
|
||||
* set to 10b as it is necessary to set both bits
|
||||
* to use AVX instructions.
|
||||
*/
|
||||
if ((val64 & (XCR0_SSE | XCR0_AVX)) == XCR0_AVX) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
if ((idx < NR_WORLD) && (cpl == 0U)) {
|
||||
/* to access XCR0,'ecx' should be 0 */
|
||||
if ((vcpu_get_gpreg(vcpu, CPU_REG_RCX) & 0xffffffffUL) == 0UL) {
|
||||
val64 = (vcpu_get_gpreg(vcpu, CPU_REG_RAX) & 0xffffffffUL) |
|
||||
(vcpu_get_gpreg(vcpu, CPU_REG_RDX) << 32U);
|
||||
|
||||
/* bit 0(x87 state) of XCR0 can't be cleared */
|
||||
if (((val64 & 0x01UL) != 0UL) && ((val64 & XCR0_RESERVED_BITS) == 0UL)) {
|
||||
/*
|
||||
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
||||
* Guest should not set these two bits without MPX support.
|
||||
* XCR0[2:1] (SSE state & AVX state) can't not be
|
||||
* set to 10b as it is necessary to set both bits
|
||||
* to use AVX instructions.
|
||||
*/
|
||||
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) != 0UL) {
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
} else {
|
||||
write_xcr(0, val64);
|
||||
if ((val64 & (XCR0_SSE | XCR0_AVX)) != XCR0_AVX) {
|
||||
/*
|
||||
* SDM Vol.1 13-4, XCR0[4:3] are associated with MPX state,
|
||||
* Guest should not set these two bits without MPX support.
|
||||
*/
|
||||
if ((val64 & (XCR0_BNDREGS | XCR0_BNDCSR)) == 0UL) {
|
||||
write_xcr(0, val64);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* CPUID.01H:ECX.XSAVE[bit 26] = 0 */
|
||||
vcpu_inject_ud(vcpu);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -903,11 +903,15 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
}
|
||||
case MSR_IA32_XSS:
|
||||
{
|
||||
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
||||
err = -EACCES;
|
||||
if (vcpu->arch.xsave_enabled) {
|
||||
if ((v & ~(MSR_IA32_XSS_PT | MSR_IA32_XSS_HDC)) != 0UL) {
|
||||
err = -EACCES;
|
||||
} else {
|
||||
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
||||
msr_write(msr, v);
|
||||
}
|
||||
} else {
|
||||
vcpu_set_guest_msr(vcpu, MSR_IA32_XSS, v);
|
||||
msr_write(msr, v);
|
||||
err = -EACCES;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -253,6 +253,7 @@ struct acrn_vcpu_arch {
|
||||
uint8_t lapic_mask;
|
||||
bool irq_window_enabled;
|
||||
bool emulating_lock;
|
||||
bool xsave_enabled;
|
||||
uint32_t nrexits;
|
||||
|
||||
/* VCPU context state information */
|
||||
|
Loading…
Reference in New Issue
Block a user