hv: support xsave in context switch

xsave area:
    legacy region: 512 bytes
    xsave header: 64 bytes
    extended region: < 3k bytes

So, pre-allocate 4k area for xsave. Use certain instruction to save or
restore the area according to hardware xsave feature set.

Tracked-On: #4166
Signed-off-by: Conghui Chen <conghui.chen@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Conghui Chen
2019-11-25 18:57:36 +00:00
committed by wenlingz
parent 8ba203a165
commit e61412981d
10 changed files with 153 additions and 37 deletions

View File

@@ -150,7 +150,14 @@
/* Number of GPRs saved / restored for guest in VCPU structure */
#define NUM_GPRS 16U
#define GUEST_STATE_AREA_SIZE 512
#define XSAVE_STATE_AREA_SIZE 4096U
#define XSAVE_LEGACY_AREA_SIZE 512U
#define XSAVE_HEADER_AREA_SIZE 64U
#define XSAVE_EXTEND_AREA_SIZE (XSAVE_STATE_AREA_SIZE - \
XSAVE_HEADER_AREA_SIZE - \
XSAVE_LEGACY_AREA_SIZE)
#define XSAVE_COMPACTED_FORMAT (1UL << 63U)
#define CPU_CONTEXT_OFFSET_RAX 0U
#define CPU_CONTEXT_OFFSET_RCX 8U
@@ -180,9 +187,6 @@
#define CPU_CONTEXT_OFFSET_IDTR 192U
#define CPU_CONTEXT_OFFSET_LDTR 216U
/*sizes of various registers within the VCPU data structure */
#define VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE GUEST_STATE_AREA_SIZE
#ifndef ASSEMBLER
#define AP_MASK (((1UL << get_pcpu_nums()) - 1UL) & ~(1UL << 0U))
@@ -344,6 +348,21 @@ struct run_context {
uint64_t ia32_efer;
};
union xsave_header {
uint64_t value[XSAVE_HEADER_AREA_SIZE / sizeof(uint64_t)];
struct {
/* bytes 7:0 */
uint64_t xstate_bv;
/* bytes 15:8 */
uint64_t xcomp_bv;
} hdr;
};
struct xsave_area {
uint64_t legacy_region[XSAVE_LEGACY_AREA_SIZE / sizeof(uint64_t)];
union xsave_header xsave_hdr;
uint64_t extend_region[XSAVE_EXTEND_AREA_SIZE / sizeof(uint64_t)];
} __aligned(64);
/*
* extended context does not save/restore during vm exit/entry, it's mainly
* used in trusty world switch
@@ -377,10 +396,9 @@ struct ext_context {
uint64_t dr7;
uint64_t tsc_offset;
/* The 512 bytes area to save the FPU/MMX/SSE states for the guest */
uint64_t
fxstore_guest_area[VMX_CPU_S_FXSAVE_GUEST_AREA_SIZE / sizeof(uint64_t)]
__aligned(16);
struct xsave_area xs_area;
uint64_t xcr0;
uint64_t xss;
};
struct cpu_context {
@@ -607,6 +625,13 @@ static inline void write_xcr(int32_t reg, uint64_t val)
asm volatile("xsetbv" : : "c" (reg), "a" ((uint32_t)val), "d" ((uint32_t)(val >> 32U)));
}
static inline uint64_t read_xcr(int32_t reg)
{
uint32_t xcrl, xcrh;
asm volatile ("xgetbv ": "=a"(xcrl), "=d"(xcrh) : "c" (reg));
return (((uint64_t)xcrh << 32U) | xcrl);
}
/*
* stac/clac pair is used to access guest's memory protected by SMAP,
* following below flow:

View File

@@ -24,7 +24,12 @@
#define FEAT_8000_0001_EDX 6U /* CPUID[8000_0001].EDX */
#define FEAT_8000_0007_EDX 7U /* CPUID[8000_0007].EDX */
#define FEAT_8000_0008_EBX 8U /* CPUID[8000_0008].EBX */
#define FEATURE_WORDS 9U
#define FEAT_D_0_EAX 9U /* CPUID[D][0].EAX */
#define FEAT_D_0_EDX 10U /* CPUID[D][0].EDX */
#define FEAT_D_1_EAX 11U /* CPUID[D][1].EAX */
#define FEAT_D_1_ECX 13U /* CPUID[D][1].ECX */
#define FEAT_D_1_EDX 14U /* CPUID[D][1].EDX */
#define FEATURE_WORDS 15U
struct cpuinfo_x86 {
uint8_t family, model;

View File

@@ -94,4 +94,8 @@
/* Intel-defined CPU features, CPUID level 0x80000007 (EDX)*/
#define X86_FEATURE_INVA_TSC ((FEAT_8000_0007_EDX << 5U) + 8U)
/* Intel-defined CPU features, CPUID level 0x0000000D, sub 0x1 */
#define X86_FEATURE_COMPACTION_EXT ((FEAT_D_1_EAX << 5U) + 1U)
#define X86_FEATURE_XSAVES ((FEAT_D_1_EAX << 5U) + 3U)
#endif /* CPUFEATURES_H */

View File

@@ -113,6 +113,7 @@
#define CPUID_TLB 2U
#define CPUID_SERIALNUM 3U
#define CPUID_EXTEND_FEATURE 7U
#define CPUID_XSAVE_FEATURES 0xDU
#define CPUID_RSD_ALLOCATION 0x10U
#define CPUID_MAX_EXTENDED_FUNCTION 0x80000000U
#define CPUID_EXTEND_FUNCTION_1 0x80000001U

View File

@@ -544,20 +544,12 @@ static inline bool is_pae(struct acrn_vcpu *vcpu)
return (vcpu_get_cr4(vcpu) & CR4_PAE) != 0UL;
}
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
{
asm volatile("fxsave (%0)"
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
{
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
}
struct acrn_vcpu *get_running_vcpu(uint16_t pcpu_id);
struct acrn_vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
void save_xsave_area(struct ext_context *ectx);
void rstore_xsave_area(const struct ext_context *ectx);
/**
* @brief create a vcpu for the target vm
*