mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 14:33:38 +00:00
hv: pass-through xsave feature to guests
enable Xsave feature and pass-through it to guests update based on v2: - enable host xsave before expose it to guests. - add validation for the value to be set to 'xcr0' before call xsetbv when handling xsetbv vmexit. - tested in SOS guest, created two threads to do different FP calculations,test code runs in user land of sos. Signed-off-by: Yonghua Huang <yonghua.huang@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
677cb6f24a
commit
a2bffc7630
@ -80,6 +80,7 @@ static struct cpu_capability cpu_caps;
|
|||||||
struct cpuinfo_x86 boot_cpu_data;
|
struct cpuinfo_x86 boot_cpu_data;
|
||||||
|
|
||||||
static void vapic_cap_detect(void);
|
static void vapic_cap_detect(void);
|
||||||
|
static void cpu_xsave_init(void);
|
||||||
static void cpu_set_logical_id(uint32_t logical_id);
|
static void cpu_set_logical_id(uint32_t logical_id);
|
||||||
static void print_hv_banner(void);
|
static void print_hv_banner(void);
|
||||||
int cpu_find_logical_id(uint32_t lapic_id);
|
int cpu_find_logical_id(uint32_t lapic_id);
|
||||||
@ -364,6 +365,8 @@ void bsp_boot_init(void)
|
|||||||
|
|
||||||
vapic_cap_detect();
|
vapic_cap_detect();
|
||||||
|
|
||||||
|
cpu_xsave_init();
|
||||||
|
|
||||||
/* Set state for this CPU to initializing */
|
/* Set state for this CPU to initializing */
|
||||||
cpu_set_current_state(CPU_BOOT_ID, CPU_STATE_INITIALIZING);
|
cpu_set_current_state(CPU_BOOT_ID, CPU_STATE_INITIALIZING);
|
||||||
|
|
||||||
@ -490,6 +493,8 @@ void cpu_secondary_init(void)
|
|||||||
|
|
||||||
pr_dbg("Core %d is up", get_cpu_id());
|
pr_dbg("Core %d is up", get_cpu_id());
|
||||||
|
|
||||||
|
cpu_xsave_init();
|
||||||
|
|
||||||
/* Release secondary boot spin-lock to allow one of the next CPU(s) to
|
/* Release secondary boot spin-lock to allow one of the next CPU(s) to
|
||||||
* perform this common initialization
|
* perform this common initialization
|
||||||
*/
|
*/
|
||||||
@ -699,3 +704,26 @@ bool is_vapic_virt_reg_supported(void)
|
|||||||
{
|
{
|
||||||
return ((cpu_caps.vapic_features & VAPIC_FEATURE_VIRT_REG) != 0);
|
return ((cpu_caps.vapic_features & VAPIC_FEATURE_VIRT_REG) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_xsave_supported(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
*todo:
|
||||||
|
*below flag also should be tested, but current it will be false
|
||||||
|
*as it is not updated after turning on the host's CR4.OSXSAVE bit,
|
||||||
|
*will be fixed in cpuid related patch.
|
||||||
|
*boot_cpu_data.cpuid_leaves[FEAT_1_ECX] & CPUID_ECX_OSXSAVE
|
||||||
|
**/
|
||||||
|
return !!(boot_cpu_data.cpuid_leaves[FEAT_1_ECX] & CPUID_ECX_XSAVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_xsave_init(void)
|
||||||
|
{
|
||||||
|
uint64_t val64;
|
||||||
|
|
||||||
|
if (is_xsave_supported()) {
|
||||||
|
CPU_CR_READ(cr4, &val64);
|
||||||
|
val64 |= CR4_OSXSAVE;
|
||||||
|
CPU_CR_WRITE(cr4, val64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -285,7 +285,7 @@ void guest_cpuid(struct vcpu *vcpu,
|
|||||||
uint32_t subleaf = *ecx;
|
uint32_t subleaf = *ecx;
|
||||||
|
|
||||||
/* vm related */
|
/* vm related */
|
||||||
if (leaf != 0x1 && leaf != 0xb) {
|
if (leaf != 0x1 && leaf != 0xb && leaf != 0xd) {
|
||||||
struct vcpuid_entry *entry =
|
struct vcpuid_entry *entry =
|
||||||
find_vcpuid_entry(vcpu, leaf, subleaf);
|
find_vcpuid_entry(vcpu, leaf, subleaf);
|
||||||
|
|
||||||
@ -329,6 +329,18 @@ void guest_cpuid(struct vcpu *vcpu,
|
|||||||
/*mask vmx to guest os */
|
/*mask vmx to guest os */
|
||||||
*ecx &= ~CPUID_ECX_VMX;
|
*ecx &= ~CPUID_ECX_VMX;
|
||||||
|
|
||||||
|
/*no xsave support for guest if it is not enabled on host*/
|
||||||
|
if (!(*ecx & CPUID_ECX_OSXSAVE))
|
||||||
|
*ecx &= ~CPUID_ECX_XSAVE;
|
||||||
|
|
||||||
|
*ecx &= ~CPUID_ECX_OSXSAVE;
|
||||||
|
if (*ecx & CPUID_ECX_XSAVE) {
|
||||||
|
uint64_t cr4;
|
||||||
|
/*read guest CR4*/
|
||||||
|
cr4 = exec_vmread(VMX_GUEST_CR4);
|
||||||
|
if (cr4 & CR4_OSXSAVE)
|
||||||
|
*ecx |= CPUID_ECX_OSXSAVE;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,6 +355,16 @@ void guest_cpuid(struct vcpu *vcpu,
|
|||||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 0x0d:
|
||||||
|
if (!is_xsave_supported()) {
|
||||||
|
*eax = 0;
|
||||||
|
*ebx = 0;
|
||||||
|
*ecx = 0;
|
||||||
|
*edx = 0;
|
||||||
|
} else
|
||||||
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
static int rdtscp_handler(struct vcpu *vcpu);
|
static int rdtscp_handler(struct vcpu *vcpu);
|
||||||
static int unhandled_vmexit_handler(struct vcpu *vcpu);
|
static int unhandled_vmexit_handler(struct vcpu *vcpu);
|
||||||
static int rdtsc_handler(struct vcpu *vcpu);
|
static int rdtsc_handler(struct vcpu *vcpu);
|
||||||
|
static int xsetbv_vmexit_handler(struct vcpu *vcpu);
|
||||||
/* VM Dispatch table for Exit condition handling */
|
/* VM Dispatch table for Exit condition handling */
|
||||||
static const struct vm_exit_dispatch dispatch_table[] = {
|
static const struct vm_exit_dispatch dispatch_table[] = {
|
||||||
[VMX_EXIT_REASON_EXCEPTION_OR_NMI] = {
|
[VMX_EXIT_REASON_EXCEPTION_OR_NMI] = {
|
||||||
@ -151,7 +152,7 @@ static const struct vm_exit_dispatch dispatch_table[] = {
|
|||||||
[VMX_EXIT_REASON_WBINVD] = {
|
[VMX_EXIT_REASON_WBINVD] = {
|
||||||
.handler = unhandled_vmexit_handler},
|
.handler = unhandled_vmexit_handler},
|
||||||
[VMX_EXIT_REASON_XSETBV] = {
|
[VMX_EXIT_REASON_XSETBV] = {
|
||||||
.handler = unhandled_vmexit_handler},
|
.handler = xsetbv_vmexit_handler},
|
||||||
[VMX_EXIT_REASON_APIC_WRITE] = {
|
[VMX_EXIT_REASON_APIC_WRITE] = {
|
||||||
.handler = apic_write_vmexit_handler,
|
.handler = apic_write_vmexit_handler,
|
||||||
.need_exit_qualification = 1}
|
.need_exit_qualification = 1}
|
||||||
@ -418,26 +419,57 @@ int invlpg_handler(__unused struct vcpu *vcpu)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XSETBV instruction set's the XCR0 that is used to tell for which components
|
* XSETBV instruction set's the XCR0 that is used to tell for which
|
||||||
* states can be saved on a context switch using xsave.
|
* components states can be saved on a context switch using xsave.
|
||||||
*
|
|
||||||
* We don't handle this right now because we are on a platform that does not
|
|
||||||
* support XSAVE/XRSTORE feature as reflected by the instruction CPUID.
|
|
||||||
*
|
|
||||||
* to make sure this never get called until we support it we can prevent the
|
|
||||||
* reading of this bit in CPUID VMEXIT.
|
|
||||||
*
|
|
||||||
* Linux checks this in CPUID: cpufeature.h: #define cpu_has_xsave
|
|
||||||
*/
|
*/
|
||||||
static int xsetbv_instr_handler(__unused struct vcpu *vcpu)
|
static int xsetbv_vmexit_handler(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
ASSERT("Not Supported" == 0, "XSETBV executed");
|
int idx;
|
||||||
|
uint64_t val64;
|
||||||
|
struct run_context *ctx_ptr;
|
||||||
|
|
||||||
|
val64 = exec_vmread(VMX_GUEST_CR4);
|
||||||
|
if (!(val64 & CR4_OSXSAVE)) {
|
||||||
|
vcpu_inject_gp(vcpu);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
idx = vcpu->arch_vcpu.cur_context;
|
||||||
|
if (idx >= NR_WORLD)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
ctx_ptr = &(vcpu->arch_vcpu.contexts[idx]);
|
||||||
|
|
||||||
|
/*to access XCR0,'rcx' should be 0*/
|
||||||
|
if (ctx_ptr->guest_cpu_regs.regs.rcx != 0) {
|
||||||
|
vcpu_inject_gp(vcpu);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
val64 = ((ctx_ptr->guest_cpu_regs.regs.rax) & 0xffffffff) |
|
||||||
|
(ctx_ptr->guest_cpu_regs.regs.rdx << 32);
|
||||||
|
|
||||||
|
/*bit 0(x87 state) of XCR0 can't be cleared*/
|
||||||
|
if (!(val64 & 0x01)) {
|
||||||
|
vcpu_inject_gp(vcpu);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*XCR0[2:1] (SSE state & AVX state) can't not be
|
||||||
|
*set to 10b as it is necessary to set both bits
|
||||||
|
*to use AVX instructions.
|
||||||
|
**/
|
||||||
|
if (((val64 >> 1) & 0x3) == 0x2) {
|
||||||
|
vcpu_inject_gp(vcpu);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_xcr(0, val64);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static int rdtsc_handler(struct vcpu *vcpu)
|
static int rdtsc_handler(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
@ -931,6 +931,11 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
|||||||
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
|
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_xsave_supported()) {
|
||||||
|
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0);
|
||||||
|
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||||
|
}
|
||||||
|
|
||||||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||||
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);
|
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);
|
||||||
|
|
||||||
|
@ -247,6 +247,7 @@ bool is_vapic_supported(void);
|
|||||||
bool is_vapic_intr_delivery_supported(void);
|
bool is_vapic_intr_delivery_supported(void);
|
||||||
bool is_vapic_virt_reg_supported(void);
|
bool is_vapic_virt_reg_supported(void);
|
||||||
bool get_vmx_cap(void);
|
bool get_vmx_cap(void);
|
||||||
|
bool is_xsave_supported(void);
|
||||||
|
|
||||||
/* Read control register */
|
/* Read control register */
|
||||||
#define CPU_CR_READ(cr, result_ptr) \
|
#define CPU_CR_READ(cr, result_ptr) \
|
||||||
@ -427,6 +428,15 @@ msr_write(uint32_t reg_num, uint64_t value64)
|
|||||||
CPU_MSR_WRITE(reg_num, value64);
|
CPU_MSR_WRITE(reg_num, value64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
write_xcr(int reg, uint64_t val)
|
||||||
|
{
|
||||||
|
uint32_t low, high;
|
||||||
|
|
||||||
|
low = val;
|
||||||
|
high = val >> 32;
|
||||||
|
asm volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
|
||||||
|
}
|
||||||
#else /* ASSEMBLER defined */
|
#else /* ASSEMBLER defined */
|
||||||
|
|
||||||
#endif /* ASSEMBLER defined */
|
#endif /* ASSEMBLER defined */
|
||||||
|
@ -83,6 +83,8 @@
|
|||||||
#define VMX_EOI_EXIT3_FULL 0x00002022
|
#define VMX_EOI_EXIT3_FULL 0x00002022
|
||||||
#define VMX_EOI_EXIT3_HIGH 0x00002023
|
#define VMX_EOI_EXIT3_HIGH 0x00002023
|
||||||
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + ((vector) / 64) * 2)
|
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + ((vector) / 64) * 2)
|
||||||
|
#define VMX_XSS_EXITING_BITMAP_FULL 0x0000202C
|
||||||
|
#define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202D
|
||||||
/* 64-bit read-only data fields */
|
/* 64-bit read-only data fields */
|
||||||
#define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400
|
#define VMX_GUEST_PHYSICAL_ADDR_FULL 0x00002400
|
||||||
#define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401
|
#define VMX_GUEST_PHYSICAL_ADDR_HIGH 0x00002401
|
||||||
|
Loading…
Reference in New Issue
Block a user