hv: refine virtualization flow for cr0 and cr4

- The current code to virtualize CR0/CR4 is not
   well designed, and hard to read.
   This patch reshuffle the logic to make it clear
   and classify those bits into PASSTHRU,
   TRAP_AND_PASSTHRU, TRAP_AND_EMULATE & reserved bits.

Tracked-On: #5586
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
Yonghua Huang
2020-12-09 21:45:12 +08:00
committed by wenlingz
parent f31be45df0
commit 442fc30117
9 changed files with 316 additions and 198 deletions

View File

@@ -536,6 +536,8 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs);
*/
void reset_vcpu_regs(struct acrn_vcpu *vcpu);
bool sanitize_cr0_cr4_pattern(void);
/**
* @brief Initialize the protect mode vcpu registers
*

View File

@@ -12,8 +12,8 @@
*
* @brief public APIs for vCR operations
*/
void init_cr0_cr4_host_mask(void);
uint64_t get_cr4_reserved_bits(void);
void init_cr0_cr4_host_guest_mask(void);
/**
* @brief vCR from vcpu

View File

@@ -431,5 +431,8 @@ void exec_vmwrite64(uint32_t field_full, uint64_t value);
void exec_vmclear(void *addr);
void exec_vmptrld(void *addr);
void init_cr0_cr4_flexible_bits(void);
bool is_valid_cr0_cr4(uint64_t cr0, uint64_t cr4);
#define POSTED_INTR_ON 0U
#endif /* VMX_H_ */