hv:remove 2 bits vmx capability check

remove is_valid_xsave_combination api,
assume the hardware or QEMU can guarantee that support
XSAVE on CPU side and XSAVE_XRSTR on VMX side or not.
will add offline-tool in QEMU platform to avoid the user
use wrong XSAVE configurations.
remov check VMX_PROCBASED_CTLS2_XSVE_XRSTR based on the above reason.
for VMX_PROCBASED_CTLS2_PAUSE_LOOP, now it will panic
if run ACRN over QEMU, here remove it from essential check,
and it will print error information when set this bit
if there is no the hardware capability.

v1-v2:
  remove is_valid_xsave_combination

Tracked-On: #6584
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Mingqiang Chi 2021-12-02 15:40:06 +08:00 committed by wenlingz
parent 5c9456462b
commit 3555aae4ac
2 changed files with 2 additions and 26 deletions

View File

@ -65,8 +65,7 @@ static struct vmx_capability vmx_caps[] = {
{
MSR_IA32_VMX_PROCBASED_CTLS2, VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |
VMX_PROCBASED_CTLS2_VPID | VMX_PROCBASED_CTLS2_RDTSCP |
VMX_PROCBASED_CTLS2_UNRESTRICT | VMX_PROCBASED_CTLS2_XSVE_XRSTR |
VMX_PROCBASED_CTLS2_PAUSE_LOOP
VMX_PROCBASED_CTLS2_UNRESTRICT
},
{
MSR_IA32_VMX_EXIT_CTLS, VMX_EXIT_CTLS_ACK_IRQ | VMX_EXIT_CTLS_SAVE_PAT |
@ -451,26 +450,6 @@ static inline bool pcpu_has_vmx_unrestricted_guest_cap(void)
return ((msr_read(MSR_IA32_VMX_MISC) & MSR_IA32_MISC_UNRESTRICTED_GUEST) != 0UL);
}
static bool is_valid_xsave_combination(void)
{
uint64_t value64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
uint32_t high = (uint32_t)(value64 >> 32U); /* allowed 1-settings */
bool ret;
/* Now we only assume the platform must support XSAVE on CPU side and XSVE_XRSTR on VMX side or not,
* in this case, we could pass through CR4.OSXSAVE bit.
*/
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
ret = pcpu_has_cap(X86_FEATURE_XSAVES) && pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) != 0U);
} else {
ret = !pcpu_has_cap(X86_FEATURE_XSAVES) && !pcpu_has_cap(X86_FEATURE_COMPACTION_EXT) &&
((high & VMX_PROCBASED_CTLS2_XSVE_XRSTR) == 0U);
}
return ret;
}
static int32_t check_vmx_mmu_cap(void)
{
int32_t ret = 0;
@ -520,9 +499,6 @@ static int32_t check_essential_vmx_caps(void)
} else if (pcpu_vmx_set_32bit_addr_width()) {
printf("%s, Only support Intel 64 architecture.\n", __func__);
ret = -ENODEV;
} else if (!is_valid_xsave_combination()) {
printf("%s, check XSAVE combined Capability failed\n", __func__);
ret = -ENODEV;
} else {
for (i = 0U; i < ARRAY_SIZE(vmx_caps); i++) {
if (!is_vmx_cap_supported(vmx_caps[i].msr, vmx_caps[i].bits)) {

View File

@ -891,7 +891,7 @@ void rstore_xsave_area(const struct acrn_vcpu *vcpu, const struct ext_context *e
* 2. "vcpu->arch.xsave_enabled" is true (state restoring for guest)
*
* Before vCPU is launched, condition 1 is satisfied.
* After vCPU is launched, condition 2 is satisfied because is_valid_xsave_combination() guarantees
* After vCPU is launched, condition 2 is satisfied because
* that "vcpu->arch.xsave_enabled" is consistent with pcpu_has_cap(X86_FEATURE_XSAVES).
*
* Therefore, the check against "vcpu->launched" and "vcpu->arch.xsave_enabled" can be eliminated here.