hypervisor: use "wbinvd" carefully in RT environment

Due to the side-effect to cache of "wbinvd" instruction, just apply it
in case of noncoherent DMA.

Tracked-On: #1824
Signed-off-by: Zheng, Gen <gen.zheng@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@intel.com>
This commit is contained in:
Zheng, Gen 2018-11-14 22:03:04 +08:00 committed by lijinxia
parent 61e6c1f054
commit b32e689a64
4 changed files with 47 additions and 5 deletions

View File

@ -14,6 +14,7 @@
static int unhandled_vmexit_handler(struct acrn_vcpu *vcpu);
static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu);
static int wbinvd_vmexit_handler(struct acrn_vcpu *vcpu);
/* VM Dispatch table for Exit condition handling */
static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
@ -127,7 +128,7 @@ static const struct vm_exit_dispatch dispatch_table[NR_VMX_EXIT_REASONS] = {
[VMX_EXIT_REASON_INVVPID] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_WBINVD] = {
.handler = unhandled_vmexit_handler},
.handler = wbinvd_vmexit_handler},
[VMX_EXIT_REASON_XSETBV] = {
.handler = xsetbv_vmexit_handler},
[VMX_EXIT_REASON_APIC_WRITE] = {
@ -360,3 +361,12 @@ static int xsetbv_vmexit_handler(struct acrn_vcpu *vcpu)
write_xcr(0, val64);
return 0;
}
static int wbinvd_vmexit_handler(struct acrn_vcpu *vcpu)
{
if (!iommu_snoop_supported(vcpu->vm)) {
cache_flush_invalidate_all();
}
return 0;
}

View File

@ -423,7 +423,9 @@ void vmx_write_cr0(struct acrn_vcpu *vcpu, uint64_t cr0)
* disabled behavior
*/
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, PAT_ALL_UC_VALUE);
if(!iommu_snoop_supported(vcpu->vm)) {
cache_flush_invalidate_all();
}
} else {
/* Restore IA32_PAT to enable cache again */
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL,
@ -854,6 +856,8 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
}
value32 |= VMX_PROCBASED_CTLS2_WBINVD;
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);

View File

@ -131,6 +131,7 @@ struct iommu_domain {
uint16_t vm_id;
uint32_t addr_width; /* address width of the domain */
uint64_t trans_table_ptr;
bool iommu_snoop;
};
struct context_table {
@ -152,6 +153,15 @@ get_ctx_table(uint32_t dmar_index, uint8_t bus_no)
return ctx_tables[dmar_index].buses[bus_no].contents;
}
bool iommu_snoop_supported(struct acrn_vm *vm)
{
if (vm->iommu == NULL || vm->iommu->iommu_snoop) {
return true;
}
return false;
}
static struct dmar_drhd_rt dmar_drhd_units[CONFIG_MAX_IOMMU_NUM];
static struct iommu_domain *vm0_domain;
@ -892,6 +902,7 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
domain->trans_table_ptr = translation_table;
domain->addr_width = addr_width;
domain->is_tt_ept = true;
domain->iommu_snoop = true;
dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
vmid_to_domainid(domain->vm_id),
@ -915,7 +926,7 @@ void destroy_iommu_domain(struct iommu_domain *domain)
(void)memset(domain, 0U, sizeof(*domain));
}
static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
static int add_iommu_device(struct iommu_domain *domain, uint16_t segment,
uint8_t bus, uint8_t devfun)
{
struct dmar_drhd_rt *dmar_uint;
@ -946,6 +957,12 @@ static int add_iommu_device(const struct iommu_domain *domain, uint16_t segment,
return 1;
}
if (iommu_ecap_sc(dmar_uint->ecap) == 0U) {
domain->iommu_snoop = false;
dev_dbg(ACRN_DBG_IOMMU, "vm=%d add %x:%x no snoop control!",
domain->vm_id, bus, devfun);
}
ASSERT(dmar_uint->root_table_addr != 0UL, "root table is not setup");
root_table =
@ -1104,7 +1121,7 @@ remove_iommu_device(const struct iommu_domain *domain, uint16_t segment,
return 0;
}
int assign_iommu_device(const struct iommu_domain *domain, uint8_t bus,
int assign_iommu_device(struct iommu_domain *domain, uint8_t bus,
uint8_t devfun)
{
/* TODO: check if the device assigned */

View File

@ -502,7 +502,7 @@ struct iommu_domain;
* @pre domain != NULL
*
*/
int assign_iommu_device(const struct iommu_domain *domain,
int assign_iommu_device(struct iommu_domain *domain,
uint8_t bus, uint8_t devfun);
/**
@ -612,6 +612,17 @@ int init_iommu(void);
*/
void init_iommu_vm0_domain(struct acrn_vm *vm0);
/**
* @brief check the iommu if support cache snoop.
*
* @param[in] vm pointer to VM to check
*
* @return true - support
* @return false - not support
*
*/
bool iommu_snoop_supported(struct acrn_vm *vm);
/**
* @}
*/